[llvm] d4299b3 - AMDGPU: Convert fcopysign tests to generated checks and add cases

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 28 03:57:49 PST 2023


Author: Matt Arsenault
Date: 2023-01-28T07:57:28-04:00
New Revision: d4299b38256c597a9dd603dcceb2a64af0eb15dc

URL: https://github.com/llvm/llvm-project/commit/d4299b38256c597a9dd603dcceb2a64af0eb15dc
DIFF: https://github.com/llvm/llvm-project/commit/d4299b38256c597a9dd603dcceb2a64af0eb15dc.diff

LOG: AMDGPU: Convert fcopysign tests to generated checks and add cases

Added: 
    llvm/test/CodeGen/AMDGPU/fcopysign.f32.r600.ll

Modified: 
    llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
    llvm/test/CodeGen/AMDGPU/fcopysign.f32.ll
    llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index 138f80dfec16..396ae55313ed 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -1,55 +1,698 @@
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,SI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GFX89 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GFX89 %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -enable-var-scope --check-prefixes=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope --check-prefixes=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 < %s | FileCheck -enable-var-scope --check-prefixes=GFX9 %s
 
-declare half @llvm.copysign.f16(half, half)
-declare float @llvm.copysign.f32(float, float)
-declare double @llvm.copysign.f64(double, double)
-declare <2 x half> @llvm.copysign.v2f16(<2 x half>, <2 x half>)
-declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>)
-declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
+declare half @llvm.copysign.f16(half, half) #0
+declare float @llvm.copysign.f32(float, float) #0
+declare double @llvm.copysign.f64(double, double) #0
+declare <2 x half> @llvm.copysign.v2f16(<2 x half>, <2 x half>) #0
+declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>) #0
+declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
 
-declare i32 @llvm.amdgcn.workitem.id.x()
-
-; GCN-LABEL: {{^}}test_copysign_f16:
-; SI: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
-; SI: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
-; SI: s_brev_b32 s[[CONST:[0-9]+]], -2
-; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
-; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
-; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_F32]]
-; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
-; GFX89: {{buffer|flat}}_load_ushort v[[MAG:[0-9]+]]
-; GFX89: {{buffer|flat}}_load_ushort v[[SIGN:[0-9]+]]
-; GFX89: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
-; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN]]
-; GCN: buffer_store_short v[[OUT]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_f16(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
-  %mag = load volatile half, ptr addrspace(1) %arg_mag
-  %sign = load volatile half, ptr addrspace(1) %arg_sign
+define amdgpu_kernel void @s_copysign_f16(ptr addrspace(1) %arg_out, half %mag, half %sign) {
+; SI-LABEL: s_copysign_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_lshr_b32 s2, s2, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, v0, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_copysign_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_movk_i32 s3, 0x7fff
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s4, s2, 16
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s4
+; VI-NEXT:    v_bfi_b32 v2, s3, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_copysign_f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshr_b32 s1, s4, 16
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, s1
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v1, v2
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
   %out = call half @llvm.copysign.f16(half %mag, half %sign)
   store half %out, ptr addrspace(1) %arg_out
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f32_mag_f16_sign_f32:
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: {{buffer|flat|global}}_load_dword v[[SIGN:[0-9]+]]
-; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; GCN-DAG: v_cvt_f32_f16_e32 v[[MAG_EXT:[0-9]+]], v[[MAG]]
-; GCN: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG_EXT]], v[[SIGN]]
-; GCN: buffer_store_dword v[[OUT]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f32_mag_f16_sign_f32(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @s_test_copysign_f16_0(ptr addrspace(1) %out, half %mag) {
+; SI-LABEL: s_test_copysign_f16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x7fff
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x7fff
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_0:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_and_b32 s0, s4, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half %mag, half 0.0)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_1(ptr addrspace(1) %out, half %mag) {
+; SI-LABEL: s_test_copysign_f16_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x7fff
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x7fff
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_and_b32 s0, s4, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half %mag, half 1.0)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_10.0(ptr addrspace(1) %out, half %mag) {
+; SI-LABEL: s_test_copysign_f16_10.0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x7fff
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_10.0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x7fff
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_10.0:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_and_b32 s0, s4, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half %mag, half 10.0)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_neg1(ptr addrspace(1) %out, half %mag) {
+; SI-LABEL: s_test_copysign_f16_neg1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset1_b32 s4, 15
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_neg1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset1_b32 s2, 15
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_neg1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_or_b32 s0, s4, 0x8000
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half %mag, half -1.0)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_neg10(ptr addrspace(1) %out, half %mag) {
+; SI-LABEL: s_test_copysign_f16_neg10:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset1_b32 s4, 15
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_neg10:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset1_b32 s2, 15
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_neg10:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_or_b32 s0, s4, 0x8000
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half %mag, half -10.0)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_0_mag(ptr addrspace(1) %out, half %sign) {
+; SI-LABEL: s_test_copysign_f16_0_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, 0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_0_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0xffff8000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v2, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_0_mag:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff8000
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half 0.0, half %sign)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+
+define amdgpu_kernel void @s_test_copysign_f16_1_mag(ptr addrspace(1) %out, half %sign) {
+; SI-LABEL: s_test_copysign_f16_1_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, 1.0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_1_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0xffff8000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s2, v0
+; VI-NEXT:    v_or_b32_e32 v2, 0x3c00, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_1_mag:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff8000
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_or_b32_e32 v1, 0x3c00, v1
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half 1.0, half %sign)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_10_mag(ptr addrspace(1) %out, half %sign) {
+; SI-LABEL: s_test_copysign_f16_10_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    v_mov_b32_e32 v1, 0x41200000
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, v1, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_10_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0xffff8000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s2, v0
+; VI-NEXT:    v_or_b32_e32 v2, 0x4900, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_10_mag:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff8000
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_or_b32_e32 v1, 0x4900, v1
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half 10.0, half %sign)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_neg1_mag(ptr addrspace(1) %out, half %sign) {
+; SI-LABEL: s_test_copysign_f16_neg1_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, -1.0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_neg1_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0xffff8000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s2, v0
+; VI-NEXT:    v_or_b32_e32 v2, 0x3c00, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_neg1_mag:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff8000
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_or_b32_e32 v1, 0x3c00, v1
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half -1.0, half %sign)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f16_neg10_mag(ptr addrspace(1) %out, half %sign) {
+; SI-LABEL: s_test_copysign_f16_neg10_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    v_mov_b32_e32 v1, 0xc1200000
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, v1, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f16_neg10_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v0, 0xffff8000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, s2, v0
+; VI-NEXT:    v_or_b32_e32 v2, 0x4900, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_test_copysign_f16_neg10_mag:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff8000
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_or_b32_e32 v1, 0x4900, v1
+; GFX9-NEXT:    global_store_short v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
+  %result = call half @llvm.copysign.f16(half -10.0, half %sign)
+  store half %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define half @v_copysign_f16(half %mag, half %sign) {
+; SI-LABEL: v_copysign_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_brev_b32 s4, -2
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_copysign_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_movk_i32 s4, 0x7fff
+; VI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_copysign_f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
+; GFX9-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = call half @llvm.copysign.f16(half %mag, half %sign)
+  ret half %result
+}
+
+define half @v_test_copysign_f16_0(half %mag) {
+; SI-LABEL: v_test_copysign_f16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, |v0|
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_test_copysign_f16_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_test_copysign_f16_0:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = call half @llvm.copysign.f16(half %mag, half 0.0)
+  ret half %result
+}
+
+define half @v_test_copysign_f16_1(half %mag) {
+; SI-LABEL: v_test_copysign_f16_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, |v0|
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_test_copysign_f16_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_test_copysign_f16_1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = call half @llvm.copysign.f16(half %mag, half 1.0)
+  ret half %result
+}
+
+define half @v_test_copysign_f16_10(half %mag) {
+; SI-LABEL: v_test_copysign_f16_10:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, |v0|
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_test_copysign_f16_10:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_test_copysign_f16_10:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = call half @llvm.copysign.f16(half %mag, half 10.0)
+  ret half %result
+}
+
+define half @v_test_copysign_f16_neg1(half %mag) {
+; SI-LABEL: v_test_copysign_f16_neg1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -|v0|
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_test_copysign_f16_neg1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_or_b32_e32 v0, 0x8000, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_test_copysign_f16_neg1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_or_b32_e32 v0, 0x8000, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = call half @llvm.copysign.f16(half %mag, half -1.0)
+  ret half %result
+}
+
+define half @v_test_copysign_f16_neg10(half %mag) {
+; SI-LABEL: v_test_copysign_f16_neg10:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -|v0|
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_test_copysign_f16_neg10:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_or_b32_e32 v0, 0x8000, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_test_copysign_f16_neg10:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_or_b32_e32 v0, 0x8000, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = call half @llvm.copysign.f16(half %mag, half -10.0)
+  ret half %result
+}
+
+define amdgpu_kernel void @v_copysign_out_f32_mag_f16_sign_f32(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    buffer_load_ushort v3, v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    buffer_load_dword v0, v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s4
+; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_bfi_b32 v0, s0, v1, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s7
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s6, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_ushort v2, v[1:2]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_brev_b32 s0, -2
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT:    s_brev_b32 s0, -2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_ushort v1, v1, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v0, v0, s[2:3]
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v1, v0
+; GFX9-NEXT:    global_store_dword v2, v0, s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load half, ptr addrspace(1) %arg_mag_gep
@@ -61,20 +704,76 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f64_mag_f16_sign_f64:
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: {{buffer|flat|global}}_load_dwordx2 v[[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]]
-; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; GCN-DAG: v_cvt_f32_f16_e32 v[[MAG_EXT:[0-9]+]], v[[MAG]]
-; GCN-DAG: v_cvt_f64_f32_e32 v[[[MAG_EXT_LO:[0-9]+]]:[[MAG_EXT_HI:[0-9]+]]], v[[MAG_EXT]]
-; GCN: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_EXT_HI]], v[[SIGN_HI]]
-; GCN: buffer_store_dwordx2 v[[[MAG_EXT_LO]]:[[OUT_HI]]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f64_mag_f16_sign_f64(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @v_copysign_out_f64_mag_f16_sign_f64(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    buffer_load_ushort v3, v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s4
+; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v3
+; SI-NEXT:    v_cvt_f64_f32_e32 v[2:3], v0
+; SI-NEXT:    v_bfi_b32 v3, s0, v3, v1
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s7
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s6, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_ushort v2, v[1:2]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_brev_b32 s0, -2
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cvt_f32_f16_e32 v0, v2
+; VI-NEXT:    v_cvt_f64_f32_e32 v[2:3], v0
+; VI-NEXT:    v_bfi_b32 v3, s0, v3, v1
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX9-NEXT:    s_brev_b32 s0, -2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_ushort v2, v1, s[6:7]
+; GFX9-NEXT:    s_nop 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v0, s[2:3]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v0, v2
+; GFX9-NEXT:    v_cvt_f64_f32_e32 v[2:3], v0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_bfi_b32 v3, s0, v3, v1
+; GFX9-NEXT:    global_store_dwordx2 v0, v[2:3], s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load half, ptr addrspace(1) %arg_mag_gep
@@ -86,21 +785,74 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f32_mag_f32_sign_f16:
-; GCN-DAG: {{buffer|flat|global}}_load_dword v[[MAG:[0-9]+]]
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[SIGN:[0-9]+]]
-; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
-; SI: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_F32]]
-; GFX89-DAG: v_lshlrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN]]
-; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_SHIFT]]
-; GCN: buffer_store_dword v[[OUT]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f32_mag_f32_sign_f16(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @v_copysign_out_f32_mag_f32_sign_f16(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    buffer_load_dword v3, v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    buffer_load_ushort v0, v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s4
+; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_bfi_b32 v0, s0, v3, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v4, v[0:1]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v2, v[0:1]
+; VI-NEXT:    s_brev_b32 s0, -2
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT:    s_brev_b32 s0, -2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_ushort v1, v1, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_dword v0, v0, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; GFX9-NEXT:    global_store_dword v2, v0, s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr float, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load float, ptr addrspace(1) %arg_mag_gep
@@ -112,21 +864,74 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f64_mag_f64_sign_f16:
-; GCN-DAG: {{buffer|flat|global}}_load_dwordx2 v[[[MAG_LO:[0-9]+]]:[[MAG_HI:[0-9]+]]]
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[SIGN:[0-9]+]]
-; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
-; SI: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_HI]], v[[SIGN_F32]]
-; GFX89-DAG: v_lshlrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN]]
-; GFX89: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_HI]], v[[SIGN_SHIFT]]
-; GCN: buffer_store_dwordx2 v[[[MAG_LO]]:[[OUT_HI]]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f64_mag_f64_sign_f16(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @v_copysign_out_f64_mag_f64_sign_f16(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    buffer_load_ushort v0, v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s4
+; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_bfi_b32 v4, s0, v4, v0
+; SI-NEXT:    buffer_store_dwordx2 v[3:4], off, s[8:11], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v4, v[0:1]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_brev_b32 s0, -2
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v1, s0, v1, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX9-NEXT:    s_brev_b32 s0, -2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_ushort v2, v1, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v3, 0
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v0, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v1, v2
+; GFX9-NEXT:    global_store_dwordx2 v3, v[0:1], s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr double, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load double, ptr addrspace(1) %arg_mag_gep
@@ -138,23 +943,76 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f16_sign_f32:
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: {{buffer|flat|global}}_load_dword v[[SIGN:[0-9]+]]
-; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
-; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN]]
-; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
-; GFX89-DAG: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
-; GFX89-DAG: v_lshrrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN]]
-; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_SHIFT]]
-; GCN: buffer_store_short v[[OUT]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f16_mag_f16_sign_f32(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @v_copysign_out_f16_mag_f16_sign_f32(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    buffer_load_ushort v3, v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    buffer_load_dword v0, v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s4
+; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_bfi_b32 v0, s0, v1, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_ushort v2, v[0:1]
+; VI-NEXT:    s_movk_i32 s0, 0x7fff
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v1, v1, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_ushort v0, v0, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; GFX9-NEXT:    global_store_short v2, v0, s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load half, ptr addrspace(1) %arg_mag_gep
@@ -166,23 +1024,73 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f16_sign_f64:
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[MAG:[0-9]+]]
-; GCN-DAG: {{buffer|flat|global}}_load_dwordx2 v[[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]]
-; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
-; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_HI]]
-; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
-; GFX89-DAG: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
-; GFX89-DAG: v_lshrrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN_HI]]
-; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_SHIFT]]
-; GCN: buffer_store_short v[[OUT]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f16_mag_f16_sign_f64(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @v_copysign_out_f16_mag_f16_sign_f64(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s14, s2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s12, s6
+; SI-NEXT:    s_mov_b32 s13, s7
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; SI-NEXT:    s_mov_b32 s10, 0
+; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[0:1], s[8:11], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v2
+; SI-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    s_movk_i32 s0, 0x7fff
+; VI-NEXT:    flat_load_ushort v3, v[0:1]
+; VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v2, s0, v3, v2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v0, s[2:3]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    global_load_ushort v2, v0, s[6:7]
+; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v2, v1
+; GFX9-NEXT:    global_store_short v0, v1, s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load half, ptr addrspace(1) %arg_mag
@@ -194,25 +1102,78 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f32_sign_f16:
-; GCN-DAG: {{buffer|flat|global}}_load_dword v[[MAG:[0-9]+]]
-; GCN-DAG: {{buffer|flat|global}}_load_ushort v[[SIGN:[0-9]+]]
-; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
-; SI-DAG: v_cvt_f16_f32_e32 v[[MAG_TRUNC:[0-9]+]], v[[MAG]]
-; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
-; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG_TRUNC]]
-; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_F32]]
-; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
-; GFX89-DAG: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
-; GFX89-DAG: v_cvt_f16_f32_e32 v[[MAG_TRUNC:[0-9]+]], v[[MAG]]
-; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG_TRUNC]], v[[SIGN]]
-; GCN: buffer_store_short v[[OUT]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f16_mag_f32_sign_f16(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
+define amdgpu_kernel void @v_copysign_out_f16_mag_f32_sign_f16(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
+; SI-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    buffer_load_dword v3, v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    buffer_load_ushort v0, v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s4
+; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_bfi_b32 v0, s0, v1, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s7
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s6, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v2, v[1:2]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v3, v[0:1]
+; VI-NEXT:    s_movk_i32 s0, 0x7fff
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v1, v1, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_ushort v0, v0, s[2:3]
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v1, v0
+; GFX9-NEXT:    global_store_short v2, v0, s[4:5]
+; GFX9-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %arg_mag_gep = getelementptr float, ptr addrspace(1) %arg_mag, i32 %tid
   %mag = load float, ptr addrspace(1) %arg_mag_gep
@@ -224,64 +1185,450 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f64_sign_f16:
-; GCN: v_bfi_b32
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_out_f16_mag_f64_sign_f16(
-  ptr addrspace(1) %arg_out,
-  ptr addrspace(1) %arg_mag,
-  ptr addrspace(1) %arg_sign) {
-entry:
-  %mag = load double, ptr addrspace(1) %arg_mag
+define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1) %arg_out, double %mag, half %sign) {
+; SI-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s4
+; SI-NEXT:    s_lshr_b32 s4, s3, 8
+; SI-NEXT:    s_and_b32 s5, s3, 0x1ff
+; SI-NEXT:    s_and_b32 s6, s4, 0xffe
+; SI-NEXT:    s_or_b32 s2, s5, s2
+; SI-NEXT:    s_cmp_lg_u32 s2, 0
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; SI-NEXT:    v_readfirstlane_b32 s2, v1
+; SI-NEXT:    s_bfe_u32 s5, s3, 0xb0014
+; SI-NEXT:    s_or_b32 s2, s6, s2
+; SI-NEXT:    s_sub_i32 s6, 0x3f1, s5
+; SI-NEXT:    v_med3_i32 v1, s6, 0, 13
+; SI-NEXT:    s_or_b32 s4, s2, 0x1000
+; SI-NEXT:    v_readfirstlane_b32 s6, v1
+; SI-NEXT:    s_lshr_b32 s6, s4, s6
+; SI-NEXT:    v_lshl_b32_e32 v1, s6, v1
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, s4, v1
+; SI-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; SI-NEXT:    s_add_i32 s8, s5, 0xfffffc10
+; SI-NEXT:    v_readfirstlane_b32 s4, v1
+; SI-NEXT:    s_lshl_b32 s5, s8, 12
+; SI-NEXT:    s_or_b32 s4, s6, s4
+; SI-NEXT:    s_or_b32 s5, s2, s5
+; SI-NEXT:    s_cmp_lt_i32 s8, 1
+; SI-NEXT:    s_cselect_b32 s9, s4, s5
+; SI-NEXT:    s_and_b32 s6, s9, 7
+; SI-NEXT:    s_cmp_gt_i32 s6, 5
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s6, 3
+; SI-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; SI-NEXT:    s_or_b64 s[4:5], s[6:7], s[4:5]
+; SI-NEXT:    s_lshr_b32 s6, s9, 2
+; SI-NEXT:    s_or_b32 s4, s4, s5
+; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    s_addc_u32 s4, s6, 0
+; SI-NEXT:    s_cmp_lt_i32 s8, 31
+; SI-NEXT:    s_cselect_b32 s6, s4, 0x7c00
+; SI-NEXT:    s_cmp_lg_u32 s2, 0
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 9, v1
+; SI-NEXT:    s_cmpk_eq_i32 s8, 0x40f
+; SI-NEXT:    v_or_b32_e32 v1, 0x7c00, v1
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_lshr_b32 s2, s3, 16
+; SI-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; SI-NEXT:    s_and_b32 s2, s2, 0x8000
+; SI-NEXT:    v_or_b32_e32 v1, s2, v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    v_bfi_b32 v0, s2, v1, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s8, s[0:1], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s0, s7, 8
+; VI-NEXT:    s_and_b32 s1, s7, 0x1ff
+; VI-NEXT:    s_and_b32 s2, s0, 0xffe
+; VI-NEXT:    s_or_b32 s0, s1, s6
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; VI-NEXT:    v_readfirstlane_b32 s0, v2
+; VI-NEXT:    s_bfe_u32 s1, s7, 0xb0014
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    s_or_b32 s4, s2, s0
+; VI-NEXT:    s_sub_i32 s2, 0x3f1, s1
+; VI-NEXT:    v_med3_i32 v2, s2, 0, 13
+; VI-NEXT:    s_or_b32 s0, s4, 0x1000
+; VI-NEXT:    v_readfirstlane_b32 s2, v2
+; VI-NEXT:    s_lshr_b32 s2, s0, s2
+; VI-NEXT:    v_lshlrev_b32_e64 v2, v2, s2
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; VI-NEXT:    s_add_i32 s5, s1, 0xfffffc10
+; VI-NEXT:    v_readfirstlane_b32 s0, v2
+; VI-NEXT:    s_lshl_b32 s1, s5, 12
+; VI-NEXT:    s_or_b32 s0, s2, s0
+; VI-NEXT:    s_or_b32 s1, s4, s1
+; VI-NEXT:    s_cmp_lt_i32 s5, 1
+; VI-NEXT:    s_cselect_b32 s6, s0, s1
+; VI-NEXT:    s_and_b32 s2, s6, 7
+; VI-NEXT:    s_cmp_gt_i32 s2, 5
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s2, 3
+; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; VI-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; VI-NEXT:    s_lshr_b32 s2, s6, 2
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_addc_u32 s0, s2, 0
+; VI-NEXT:    s_cmp_lt_i32 s5, 31
+; VI-NEXT:    s_cselect_b32 s2, s0, 0x7c00
+; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 9, v2
+; VI-NEXT:    s_cmpk_eq_i32 s5, 0x40f
+; VI-NEXT:    v_or_b32_e32 v2, 0x7c00, v2
+; VI-NEXT:    v_mov_b32_e32 v3, s2
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_lshr_b32 s0, s7, 16
+; VI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; VI-NEXT:    s_and_b32 s0, s0, 0x8000
+; VI-NEXT:    v_or_b32_e32 v2, s0, v2
+; VI-NEXT:    s_movk_i32 s0, 0x7fff
+; VI-NEXT:    v_mov_b32_e32 v3, s8
+; VI-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshr_b32 s0, s7, 8
+; GFX9-NEXT:    s_and_b32 s1, s7, 0x1ff
+; GFX9-NEXT:    s_and_b32 s2, s0, 0xffe
+; GFX9-NEXT:    s_or_b32 s0, s1, s6
+; GFX9-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX9-NEXT:    s_bfe_u32 s1, s7, 0xb0014
+; GFX9-NEXT:    s_or_b32 s6, s2, s0
+; GFX9-NEXT:    s_sub_i32 s2, 0x3f1, s1
+; GFX9-NEXT:    v_med3_i32 v1, s2, 0, 13
+; GFX9-NEXT:    s_or_b32 s0, s6, 0x1000
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX9-NEXT:    s_lshr_b32 s2, s0, s2
+; GFX9-NEXT:    v_lshlrev_b32_e64 v1, v1, s2
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, s0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    s_add_i32 s9, s1, 0xfffffc10
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX9-NEXT:    s_lshl_b32 s1, s9, 12
+; GFX9-NEXT:    s_or_b32 s0, s2, s0
+; GFX9-NEXT:    s_or_b32 s1, s6, s1
+; GFX9-NEXT:    s_cmp_lt_i32 s9, 1
+; GFX9-NEXT:    s_cselect_b32 s10, s0, s1
+; GFX9-NEXT:    s_and_b32 s2, s10, 7
+; GFX9-NEXT:    s_cmp_gt_i32 s2, 5
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 3
+; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX9-NEXT:    s_lshr_b32 s2, s10, 2
+; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT:    s_addc_u32 s0, s2, 0
+; GFX9-NEXT:    s_cmp_lt_i32 s9, 31
+; GFX9-NEXT:    s_cselect_b32 s2, s0, 0x7c00
+; GFX9-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 9, v1
+; GFX9-NEXT:    s_cmpk_eq_i32 s9, 0x40f
+; GFX9-NEXT:    v_or_b32_e32 v1, 0x7c00, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX9-NEXT:    s_lshr_b32 s0, s7, 16
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT:    s_and_b32 s0, s0, 0x8000
+; GFX9-NEXT:    v_or_b32_e32 v1, s0, v1
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v2, s8
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v1, v2
+; GFX9-NEXT:    global_store_short v0, v1, s[4:5]
+; GFX9-NEXT:    s_endpgm
   %mag.trunc = fptrunc double %mag to half
-  %sign = load half, ptr addrspace(1) %arg_sign
-  %out = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
-  store half %out, ptr addrspace(1) %arg_out
+  %result = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
+  store half %result, ptr addrspace(1) %arg_out
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_v2f16:
-; GCN: v_bfi_b32
-; GCN: v_bfi_b32
-; VI: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_v2f16(
-  ptr addrspace(1) %arg_out,
-  <2 x half> %arg_mag,
-  <2 x half> %arg_sign) {
-entry:
+define amdgpu_kernel void @s_copysign_v2f16(ptr addrspace(1) %arg_out, <2 x half> %arg_mag, <2 x half> %arg_sign) {
+; SI-LABEL: s_copysign_v2f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s4, s2, 16
+; SI-NEXT:    s_lshr_b32 s5, s3, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s4
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, s5
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, s2
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, s3
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v0, s2, v0, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_bfi_b32 v1, s2, v2, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_copysign_v2f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_movk_i32 s4, 0x7fff
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    s_lshr_b32 s3, s3, 16
+; VI-NEXT:    s_lshr_b32 s2, s2, 16
+; VI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_bfi_b32 v1, s4, v1, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_copysign_v2f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    s_lshr_b32 s3, s3, 16
+; GFX9-NEXT:    s_lshr_b32 s2, s2, 16
+; GFX9-NEXT:    v_bfi_b32 v1, s4, v1, v2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NEXT:    v_bfi_b32 v2, s4, v2, v3
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX9-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT:    s_endpgm
   %out = call <2 x half> @llvm.copysign.v2f16(<2 x half> %arg_mag, <2 x half> %arg_sign)
   store <2 x half> %out, ptr addrspace(1) %arg_out
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_v3f16:
-; GCN: v_bfi_b32
-; GCN: v_bfi_b32
-; GCN: v_bfi_b32
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_v3f16(
-  ptr addrspace(1) %arg_out,
-  <3 x half> %arg_mag,
-  <3 x half> %arg_sign) {
-entry:
+define amdgpu_kernel void @s_copysign_v3f16(ptr addrspace(1) %arg_out, <3 x half> %arg_mag, <3 x half> %arg_sign) {
+; SI-LABEL: s_copysign_v3f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s2, s4, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, s2
+; SI-NEXT:    s_lshr_b32 s2, s6, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, s2
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s5
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, s4
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, s7
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_bfi_b32 v2, s2, v2, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_bfi_b32 v1, s2, v1, v5
+; SI-NEXT:    v_bfi_b32 v0, s2, v0, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:4
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_copysign_v3f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_movk_i32 s2, 0x7fff
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s6
+; VI-NEXT:    s_lshr_b32 s3, s6, 16
+; VI-NEXT:    s_lshr_b32 s4, s4, 16
+; VI-NEXT:    v_bfi_b32 v0, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v1, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_bfi_b32 v1, s2, v1, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
+; VI-NEXT:    s_add_u32 s2, s0, 4
+; VI-NEXT:    s_addc_u32 s3, s1, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_copysign_v3f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9-NEXT:    s_lshr_b32 s1, s6, 16
+; GFX9-NEXT:    s_lshr_b32 s4, s4, 16
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v1, v2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v3, s1
+; GFX9-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX9-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s5
+; GFX9-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9-NEXT:    v_bfi_b32 v2, s0, v2, v3
+; GFX9-NEXT:    global_store_short v0, v2, s[2:3] offset:4
+; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX9-NEXT:    s_endpgm
   %out = call <3 x half> @llvm.copysign.v3f16(<3 x half> %arg_mag, <3 x half> %arg_sign)
   store <3 x half> %out, ptr addrspace(1) %arg_out
   ret void
 }
 
-; GCN-LABEL: {{^}}test_copysign_v4f16:
-; GCN: v_bfi_b32
-; GCN: v_bfi_b32
-; GCN: v_bfi_b32
-; GCN: v_bfi_b32
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_v4f16(
-  ptr addrspace(1) %arg_out,
-  <4 x half> %arg_mag,
-  <4 x half> %arg_sign) {
-entry:
+define amdgpu_kernel void @s_copysign_v4f16(ptr addrspace(1) %arg_out, <4 x half> %arg_mag, <4 x half> %arg_sign) {
+; SI-LABEL: s_copysign_v4f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s8, s4, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT:    s_lshr_b32 s4, s6, 16
+; SI-NEXT:    s_lshr_b32 s9, s5, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT:    s_lshr_b32 s4, s7, 16
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s8
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, s9
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, s4
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, s5
+; SI-NEXT:    v_cvt_f32_f16_e32 v6, s6
+; SI-NEXT:    v_cvt_f32_f16_e32 v7, s7
+; SI-NEXT:    s_brev_b32 s4, -2
+; SI-NEXT:    v_bfi_b32 v1, s4, v1, v5
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_bfi_b32 v3, s4, v3, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_bfi_b32 v2, s4, v2, v6
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_or_b32_e32 v1, v3, v1
+; SI-NEXT:    v_or_b32_e32 v0, v2, v0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_copysign_v4f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_movk_i32 s2, 0x7fff
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    s_lshr_b32 s3, s7, 16
+; VI-NEXT:    s_lshr_b32 s5, s5, 16
+; VI-NEXT:    v_bfi_b32 v0, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_bfi_b32 v1, s2, v1, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_lshr_b32 s3, s6, 16
+; VI-NEXT:    s_lshr_b32 s4, s4, 16
+; VI-NEXT:    v_bfi_b32 v0, s2, v0, v2
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_bfi_b32 v2, s2, v2, v3
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: s_copysign_v4f16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-NEXT:    s_lshr_b32 s1, s7, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s5, 16
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_mov_b32_e32 v3, s1
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v1, v3
+; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT:    v_lshl_or_b32 v1, v1, 16, v0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v3, s6
+; GFX9-NEXT:    s_lshr_b32 s1, s6, 16
+; GFX9-NEXT:    s_lshr_b32 s4, s4, 16
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v3
+; GFX9-NEXT:    v_mov_b32_e32 v3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v4, s1
+; GFX9-NEXT:    v_bfi_b32 v3, s0, v3, v4
+; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT:    v_lshl_or_b32 v0, v3, 16, v0
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT:    s_endpgm
   %out = call <4 x half> @llvm.copysign.v4f16(<4 x half> %arg_mag, <4 x half> %arg_sign)
   store <4 x half> %out, ptr addrspace(1) %arg_out
   ret void
 }
+
+attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }

diff  --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f32.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f32.ll
index e02230d4e421..712616dc5910 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f32.ll
@@ -1,51 +1,787 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-
-declare float @llvm.copysign.f32(float, float) nounwind readnone
-declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind readnone
-
-; Try to identify arg based on higher address.
-; FUNC-LABEL: {{^}}test_copysign_f32:
-; SI: s_load_dwordx4 s[[[#LOAD:]]:[[#END:]]], {{.*}} 0x9
-; VI: s_load_dwordx4 s[[[#LOAD:]]:[[#END:]]], {{.*}} 0x24
-
-; GCN-DAG: v_mov_b32_e32 [[VSIGN:v[0-9]+]], s[[#LOAD + 3]]
-; GCN-DAG: v_mov_b32_e32 [[VMAG:v[0-9]+]], s[[#LOAD + 2]]
-; GCN-DAG: s_brev_b32 [[SCONST:s[0-9]+]], -2
-; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[SCONST]], [[VMAG]], [[VSIGN]]
-; GCN: buffer_store_dword [[RESULT]],
-; GCN: s_endpgm
-
-; EG: BFI_INT
-define amdgpu_kernel void @test_copysign_f32(ptr addrspace(1) %out, float %mag, float %sign) nounwind {
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,VI %s
+
+define amdgpu_kernel void @s_test_copysign_f32(ptr addrspace(1) %out, float %mag, float %sign) {
+; SI-LABEL: s_test_copysign_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_brev_b32 s8, -2
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v0, s8, v0, v1
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s4, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_bfi_b32 v2, s4, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %result = call float @llvm.copysign.f32(float %mag, float %sign)
   store float %result, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; FUNC-LABEL: {{^}}test_copysign_v2f32:
-; GCN: s_endpgm
+define amdgpu_kernel void @s_test_copysign_f32_0(ptr addrspace(1) %out, float %mag) {
+; SI-LABEL: s_test_copysign_f32_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset0_b32 s4, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset0_b32 s2, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float %mag, float 0.0)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_1(ptr addrspace(1) %out, float %mag) {
+; SI-LABEL: s_test_copysign_f32_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset0_b32 s4, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset0_b32 s2, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float %mag, float 1.0)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_10.0(ptr addrspace(1) %out, float %mag) {
+; SI-LABEL: s_test_copysign_f32_10.0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset0_b32 s4, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_10.0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset0_b32 s2, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float %mag, float 10.0)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_neg1(ptr addrspace(1) %out, float %mag) {
+; SI-LABEL: s_test_copysign_f32_neg1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset1_b32 s4, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_neg1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset1_b32 s2, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float %mag, float -1.0)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_neg10(ptr addrspace(1) %out, float %mag) {
+; SI-LABEL: s_test_copysign_f32_neg10:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset1_b32 s4, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_neg10:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset1_b32 s2, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float %mag, float -10.0)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_0_mag(ptr addrspace(1) %out, float %sign) {
+; SI-LABEL: s_test_copysign_f32_0_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_0_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x80000000
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float 0.0, float %sign)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+
+define amdgpu_kernel void @s_test_copysign_f32_1_mag(ptr addrspace(1) %out, float %sign) {
+; SI-LABEL: s_test_copysign_f32_1_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x80000000
+; SI-NEXT:    s_or_b32 s4, s4, 1.0
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_1_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x80000000
+; VI-NEXT:    s_or_b32 s2, s2, 1.0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float 1.0, float %sign)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_10_mag(ptr addrspace(1) %out, float %sign) {
+; SI-LABEL: s_test_copysign_f32_10_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x80000000
+; SI-NEXT:    s_or_b32 s4, s4, 0x41200000
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_10_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x80000000
+; VI-NEXT:    s_or_b32 s2, s2, 0x41200000
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float 10.0, float %sign)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
 
-; EG: BFI_INT
-; EG: BFI_INT
-define amdgpu_kernel void @test_copysign_v2f32(ptr addrspace(1) %out, <2 x float> %mag, <2 x float> %sign) nounwind {
+define amdgpu_kernel void @s_test_copysign_f32_neg1_mag(ptr addrspace(1) %out, float %sign) {
+; SI-LABEL: s_test_copysign_f32_neg1_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x80000000
+; SI-NEXT:    s_or_b32 s4, s4, 1.0
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_neg1_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x80000000
+; VI-NEXT:    s_or_b32 s2, s2, 1.0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float -1.0, float %sign)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_neg10_mag(ptr addrspace(1) %out, float %sign) {
+; SI-LABEL: s_test_copysign_f32_neg10_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0x80000000
+; SI-NEXT:    s_or_b32 s4, s4, 0x41200000
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_neg10_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0x80000000
+; VI-NEXT:    s_or_b32 s2, s2, 0x41200000
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %result = call float @llvm.copysign.f32(float -10.0, float %sign)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_v2f32(ptr addrspace(1) %out, <2 x float> %mag, <2 x float> %sign) {
+; SI-LABEL: s_test_copysign_v2f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_brev_b32 s8, -2
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    v_bfi_b32 v1, s8, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_bfi_b32 v0, s8, v0, v2
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_v2f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_bfi_b32 v0, s2, v2, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
   store <2 x float> %result, ptr addrspace(1) %out, align 8
   ret void
 }
 
-; FUNC-LABEL: {{^}}test_copysign_v4f32:
-; GCN: s_endpgm
+define amdgpu_kernel void @s_test_copysign_v3f32(ptr addrspace(1) %out, <3 x float> %mag, <3 x float> %sign) {
+; SI-LABEL: s_test_copysign_v3f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s7, -2
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_bfi_b32 v1, s7, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_bfi_b32 v0, s7, v0, v2
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s10
+; SI-NEXT:    v_bfi_b32 v2, s7, v2, v3
+; SI-NEXT:    buffer_store_dword v2, off, s[0:3], 0 offset:8
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_v3f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s10
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_bfi_b32 v2, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_bfi_b32 v1, s2, v3, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s8
+; VI-NEXT:    v_bfi_b32 v0, s2, v0, v3
+; VI-NEXT:    v_mov_b32_e32 v4, s1
+; VI-NEXT:    v_mov_b32_e32 v3, s0
+; VI-NEXT:    flat_store_dwordx3 v[3:4], v[0:2]
+; VI-NEXT:    s_endpgm
+  %result = call <3 x float> @llvm.copysign.v3f32(<3 x float> %mag, <3 x float> %sign)
+  store <3 x float> %result, ptr addrspace(1) %out, align 16
+  ret void
+}
 
-; EG: BFI_INT
-; EG: BFI_INT
-; EG: BFI_INT
-; EG: BFI_INT
-define amdgpu_kernel void @test_copysign_v4f32(ptr addrspace(1) %out, <4 x float> %mag, <4 x float> %sign) nounwind {
+define amdgpu_kernel void @s_test_copysign_v4f32(ptr addrspace(1) %out, <4 x float> %mag, <4 x float> %sign) {
+; SI-LABEL: s_test_copysign_v4f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
+; SI-NEXT:    s_brev_b32 s12, -2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_bfi_b32 v3, s12, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s10
+; SI-NEXT:    v_bfi_b32 v2, s12, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_bfi_b32 v1, s12, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    v_bfi_b32 v0, s12, v0, v4
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_v4f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_bfi_b32 v2, s2, v2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_bfi_b32 v0, s2, v0, v4
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
   %result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
   store <4 x float> %result, ptr addrspace(1) %out, align 16
   ret void
 }
 
+define float @v_test_copysign_f32(float %mag, float %sign) {
+; GCN-LABEL: v_test_copysign_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call float @llvm.copysign.f32(float %mag, float %sign)
+  ret float %result
+}
+
+define float @v_test_copysign_f32_0(float %mag) {
+; GCN-LABEL: v_test_copysign_f32_0:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call float @llvm.copysign.f32(float %mag, float 0.0)
+  ret float %result
+}
+
+define float @v_test_copysign_f32_1(float %mag) {
+; GCN-LABEL: v_test_copysign_f32_1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call float @llvm.copysign.f32(float %mag, float 1.0)
+  ret float %result
+}
+
+define float @v_test_copysign_f32_10(float %mag) {
+; GCN-LABEL: v_test_copysign_f32_10:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call float @llvm.copysign.f32(float %mag, float 10.0)
+  ret float %result
+}
+
+define float @v_test_copysign_f32_neg1(float %mag) {
+; GCN-LABEL: v_test_copysign_f32_neg1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call float @llvm.copysign.f32(float %mag, float -1.0)
+  ret float %result
+}
+
+define float @v_test_copysign_f32_neg10(float %mag) {
+; GCN-LABEL: v_test_copysign_f32_neg10:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call float @llvm.copysign.f32(float %mag, float -10.0)
+  ret float %result
+}
+
+define <2 x float> @v_test_copysign_v2f32(<2 x float> %mag, <2 x float> %sign) {
+; GCN-LABEL: v_test_copysign_v2f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v2
+; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v3
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
+  ret <2 x float> %result
+}
+
+define <2 x float> @v_test_copysign_v2f32_0(<2 x float> %mag) {
+; GCN-LABEL: v_test_copysign_v2f32_0:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> zeroinitializer)
+  ret <2 x float> %result
+}
+
+define <2 x float> @v_test_copysign_v2f32_neg1(<2 x float> %mag) {
+; GCN-LABEL: v_test_copysign_v2f32_neg1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    v_or_b32_e32 v1, 0x80000000, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> <float -1.0, float -1.0>)
+  ret <2 x float> %result
+}
+
+define <3 x float> @v_test_copysign_v3f32(<3 x float> %mag, <3 x float> %sign) {
+; GCN-LABEL: v_test_copysign_v3f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v3
+; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v4
+; GCN-NEXT:    v_bfi_b32 v2, s4, v2, v5
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <3 x float> @llvm.copysign.v3f32(<3 x float> %mag, <3 x float> %sign)
+  ret <3 x float> %result
+}
+
+define <4 x float> @v_test_copysign_v4f32(<4 x float> %mag, <4 x float> %sign) {
+; GCN-LABEL: v_test_copysign_v4f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v4
+; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v5
+; GCN-NEXT:    v_bfi_b32 v2, s4, v2, v6
+; GCN-NEXT:    v_bfi_b32 v3, s4, v3, v7
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
+  ret <4 x float> %result
+}
+
+define <5 x float> @v_test_copysign_v5f32(<5 x float> %mag, <5 x float> %sign) {
+; GCN-LABEL: v_test_copysign_v5f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v5
+; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v6
+; GCN-NEXT:    v_bfi_b32 v2, s4, v2, v7
+; GCN-NEXT:    v_bfi_b32 v3, s4, v3, v8
+; GCN-NEXT:    v_bfi_b32 v4, s4, v4, v9
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <5 x float> @llvm.copysign.v5f32(<5 x float> %mag, <5 x float> %sign)
+  ret <5 x float> %result
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_fptrunc_f64(ptr addrspace(1) %out, float %mag, double %sign) {
+; SI-LABEL: s_test_copysign_f32_fptrunc_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_fptrunc_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_bfi_b32 v2, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %sign.trunc = fptrunc double %sign to float
+  %result = call float @llvm.copysign.f32(float %mag, float %sign.trunc)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_1_fptrunc_f64(ptr addrspace(1) %out, double %sign) {
+; SI-LABEL: s_test_copysign_f32_1_fptrunc_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; SI-NEXT:    s_or_b32 s0, s0, 1.0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_1_fptrunc_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; VI-NEXT:    s_or_b32 s0, s0, 1.0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %sign.trunc = fptrunc double %sign to float
+  %result = call float @llvm.copysign.f32(float 1.0, float %sign.trunc)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_fpext_f16(ptr addrspace(1) %out, float %mag, half %sign) {
+; SI-LABEL: s_test_copysign_f32_fpext_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v0, s0, v1, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_fpext_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s4, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e64 v0, 16, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_bfi_b32 v2, s4, v1, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %sign.ext = fpext half %sign to float
+  %result = call float @llvm.copysign.f32(float %mag, float %sign.ext)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_1_fpext_f16(ptr addrspace(1) %out, half %sign) {
+; SI-LABEL: s_test_copysign_f32_1_fpext_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_and_b32_e32 v0, 0x80000000, v0
+; SI-NEXT:    v_or_b32_e32 v0, 1.0, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_1_fpext_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b32 s2, s2, 16
+; VI-NEXT:    s_and_b32 s2, s2, 0x80000000
+; VI-NEXT:    s_or_b32 s2, s2, 1.0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %sign.ext = fpext half %sign to float
+  %result = call float @llvm.copysign.f32(float 1.0, float %sign.ext)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f32_fpext_bf16(ptr addrspace(1) %out, float %mag, bfloat %sign) {
+; SI-LABEL: s_test_copysign_f32_fpext_bf16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_lshl_b32 s0, s3, 16
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    s_brev_b32 s1, -2
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_mov_b32_e32 v1, s0
+; SI-NEXT:    v_bfi_b32 v0, s1, v0, v1
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f32_fpext_bf16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s4, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b32 s3, s3, 16
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_bfi_b32 v2, s4, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+  %sign.ext = fpext bfloat %sign to float
+  %result = call float @llvm.copysign.f32(float %mag, float %sign.ext)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+declare float @llvm.copysign.f32(float, float) #0
+declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) #0
+declare <3 x float> @llvm.copysign.v3f32(<3 x float>, <3 x float>) #0
+declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) #0
+declare <5 x float> @llvm.copysign.v5f32(<5 x float>, <5 x float>) #0
+
+attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }

diff  --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f32.r600.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f32.r600.ll
new file mode 100644
index 000000000000..d96941f23ba0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f32.r600.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+
+declare float @llvm.copysign.f32(float, float) nounwind readnone
+declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind readnone
+
+; EG: BFI_INT
+define amdgpu_kernel void @test_copysign_f32(ptr addrspace(1) %out, float %mag, float %sign) nounwind {
+  %result = call float @llvm.copysign.f32(float %mag, float %sign)
+  store float %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; EG: BFI_INT
+; EG: BFI_INT
+define amdgpu_kernel void @test_copysign_v2f32(ptr addrspace(1) %out, <2 x float> %mag, <2 x float> %sign) nounwind {
+  %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
+  store <2 x float> %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+; EG: BFI_INT
+; EG: BFI_INT
+; EG: BFI_INT
+; EG: BFI_INT
+define amdgpu_kernel void @test_copysign_v4f32(ptr addrspace(1) %out, <4 x float> %mag, <4 x float> %sign) nounwind {
+  %result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
+  store <4 x float> %result, ptr addrspace(1) %out, align 16
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
index 7ca233446732..97dd018b677c 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
@@ -1,57 +1,766 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
-
-declare double @llvm.copysign.f64(double, double) nounwind readnone
-declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) nounwind readnone
-declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) nounwind readnone
-
-; FUNC-LABEL: {{^}}test_copysign_f64:
-; SI-DAG: s_load_dwordx2 s[[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, 0x13
-; SI-DAG: s_load_dwordx2 s[[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, 0x1d
-; VI-DAG: s_load_dwordx2 s[[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, 0x4c
-; VI-DAG: s_load_dwordx2 s[[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, 0x74
-; GCN-DAG: v_mov_b32_e32 v[[VSIGN_HI:[0-9]+]], s[[SSIGN_HI]]
-; GCN-DAG: v_mov_b32_e32 v[[VMAG_HI:[0-9]+]], s[[SMAG_HI]]
-; GCN-DAG: s_brev_b32 [[SCONST:s[0-9]+]], -2
-; GCN-DAG: v_bfi_b32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN_HI]]
-; GCN-DAG: v_mov_b32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
-; GCN: buffer_store_dwordx2 v[[[VMAG_LO]]:[[VRESULT_HI]]]
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_f64(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], double %sign) nounwind {
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,VI %s
+
+declare double @llvm.copysign.f64(double, double) #0
+declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) #0
+declare <3 x double> @llvm.copysign.v3f64(<3 x double>, <3 x double>) #0
+declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) #0
+
+define amdgpu_kernel void @s_test_copysign_f64(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], double %sign) {
+; SI-LABEL: s_test_copysign_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x1d
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s0, -2
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_bfi_b32 v1, s0, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x74
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s4, -2
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_bfi_b32 v1, s4, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %result = call double @llvm.copysign.f64(double %mag, double %sign)
   store double %result, ptr addrspace(1) %out, align 8
   ret void
 }
 
-; FUNC-LABEL: {{^}}test_copysign_f64_f32:
-; SI-DAG: s_load_dwordx2 s[[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, 0x13
-; VI-DAG: s_load_dwordx2 s[[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, 0x4c
-; GCN-DAG: s_load_dword s[[SSIGN:[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}
-; GCN-DAG: s_brev_b32 [[SCONST:s[0-9]+]], -2{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[VMAG_HI:[0-9]+]], s[[SMAG_HI]]
-; GCN-DAG: v_mov_b32_e32 v[[VSIGN:[0-9]+]], s[[SSIGN]]
-; GCN-DAG: v_bfi_b32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN]]
-; GCN-DAG: v_mov_b32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
-; GCN: buffer_store_dwordx2 v[[[VMAG_LO]]:[[VRESULT_HI]]]
-define amdgpu_kernel void @test_copysign_f64_f32(ptr addrspace(1) %out, [8 x i32], double %mag, float %sign) nounwind {
-  %c = fpext float %sign to double
-  %result = call double @llvm.copysign.f64(double %mag, double %c)
+define amdgpu_kernel void @s_test_copysign_f64_0(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; SI-LABEL: s_test_copysign_f64_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset0_b32 s5, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset0_b32 s3, 31
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double %mag, double 0.0)
+  store double %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_1(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; SI-LABEL: s_test_copysign_f64_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset0_b32 s5, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset0_b32 s3, 31
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double %mag, double 1.0)
+  store double %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_10(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; SI-LABEL: s_test_copysign_f64_10:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset0_b32 s5, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_10:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset0_b32 s3, 31
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double %mag, double 10.0)
+  store double %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_neg1(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; SI-LABEL: s_test_copysign_f64_neg1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset1_b32 s5, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_neg1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset1_b32 s3, 31
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double %mag, double -1.0)
+  store double %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_neg10(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; SI-LABEL: s_test_copysign_f64_neg10:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitset1_b32 s5, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_neg10:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitset1_b32 s3, 31
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double %mag, double -10.0)
   store double %result, ptr addrspace(1) %out, align 8
   ret void
 }
 
-; FUNC-LABEL: {{^}}test_copysign_v2f64:
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_v2f64(ptr addrspace(1) %out, <2 x double> %mag, <2 x double> %sign) nounwind {
+define amdgpu_kernel void @s_test_copysign_f64_f32(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], float %sign) {
+; SI-LABEL: s_test_copysign_f64_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x13
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x1d
+; SI-NEXT:    s_brev_b32 s1, -2
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_mov_b32_e32 v1, s0
+; SI-NEXT:    v_bfi_b32 v1, s1, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dword s4, s[0:1], 0x74
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s5, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_bfi_b32 v1, s5, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %sign.ext = fpext float %sign to double
+  %result = call double @llvm.copysign.f64(double %mag, double %sign.ext)
+  store double %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_f16(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], half %sign) {
+; SI-LABEL: s_test_copysign_f64_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0x1d
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x13
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_bfi_b32 v1, s2, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s4, s[0:1], 0x74
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s5, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e64 v0, 16, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_bfi_b32 v1, s5, v1, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+  %sign.ext = fpext half %sign to double
+  %result = call double @llvm.copysign.f64(double %mag, double %sign.ext)
+  store double %result, ptr addrspace(1) %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_0_mag(ptr addrspace(1) %out, double %sign) {
+; SI-LABEL: s_test_copysign_f64_0_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v1, s2, 0, v0
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_0_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_bfi_b32 v3, s2, 0, v2
+; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double 0.0, double %sign)
+  store double %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_1_mag(ptr addrspace(1) %out, double %sign) {
+; SI-LABEL: s_test_copysign_f64_1_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_mov_b32_e32 v0, 0x3ff00000
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_1_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    v_mov_b32_e32 v2, 0x3ff00000
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
+; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double 1.0, double %sign)
+  store double %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_10_mag(ptr addrspace(1) %out, double %sign) {
+; SI-LABEL: s_test_copysign_f64_10_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_mov_b32_e32 v0, 0x40240000
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_10_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    v_mov_b32_e32 v2, 0x40240000
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
+; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double 10.0, double %sign)
+  store double %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_neg1_mag(ptr addrspace(1) %out, double %sign) {
+; SI-LABEL: s_test_copysign_f64_neg1_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_mov_b32_e32 v0, 0xbff00000
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_neg1_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    v_mov_b32_e32 v2, 0xbff00000
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
+; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double -1.0, double %sign)
+  store double %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_f64_neg10_mag(ptr addrspace(1) %out, double %sign) {
+; SI-LABEL: s_test_copysign_f64_neg10_mag:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_mov_b32_e32 v0, 0xc0240000
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_f64_neg10_mag:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    v_mov_b32_e32 v2, 0xc0240000
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
+; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
+  %result = call double @llvm.copysign.f64(double -10.0, double %sign)
+  store double %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_v2f64(ptr addrspace(1) %out, <2 x double> %mag, <2 x double> %sign) {
+; SI-LABEL: s_test_copysign_v2f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s8, -2
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_bfi_b32 v3, s8, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_bfi_b32 v1, s8, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_v2f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
   %result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
-  store <2 x double> %result, ptr addrspace(1) %out, align 8
+  store <2 x double> %result, ptr addrspace(1) %out, align 16
+  ret void
+}
+
+define amdgpu_kernel void @s_test_copysign_v3f64(ptr addrspace(1) %out, <3 x double> %mag, <3 x double> %sign) {
+; SI-LABEL: s_test_copysign_v3f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x11
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s10, -2
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_mov_b32_e32 v1, s15
+; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_mov_b32_e32 v5, s17
+; SI-NEXT:    v_bfi_b32 v3, s10, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v1, s13
+; SI-NEXT:    v_bfi_b32 v5, s10, v4, v5
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_bfi_b32 v1, s10, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_v3f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x44
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s15
+; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s13
+; VI-NEXT:    v_mov_b32_e32 v4, s9
+; VI-NEXT:    v_mov_b32_e32 v5, s17
+; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; VI-NEXT:    v_bfi_b32 v5, s2, v4, v5
+; VI-NEXT:    s_add_u32 s2, s0, 16
+; VI-NEXT:    s_addc_u32 s3, s1, 0
+; VI-NEXT:    v_mov_b32_e32 v7, s3
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    flat_store_dwordx2 v[6:7], v[4:5]
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
+  %result = call <3 x double> @llvm.copysign.v3f64(<3 x double> %mag, <3 x double> %sign)
+  store <3 x double> %result, ptr addrspace(1) %out, align 32
   ret void
 }
 
-; FUNC-LABEL: {{^}}test_copysign_v4f64:
-; GCN: s_endpgm
-define amdgpu_kernel void @test_copysign_v4f64(ptr addrspace(1) %out, <4 x double> %mag, <4 x double> %sign) nounwind {
+define amdgpu_kernel void @s_test_copysign_v4f64(ptr addrspace(1) %out, <4 x double> %mag, <4 x double> %sign) {
+; SI-LABEL: s_test_copysign_v4f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x11
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s12, -2
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v4, s11
+; SI-NEXT:    v_mov_b32_e32 v5, s19
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_mov_b32_e32 v1, s15
+; SI-NEXT:    v_bfi_b32 v7, s12, v4, v5
+; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_mov_b32_e32 v5, s17
+; SI-NEXT:    v_bfi_b32 v3, s12, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v1, s13
+; SI-NEXT:    v_mov_b32_e32 v6, s10
+; SI-NEXT:    v_bfi_b32 v5, s12, v4, v5
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_bfi_b32 v1, s12, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_test_copysign_v4f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x44
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_brev_b32 s2, -2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s15
+; VI-NEXT:    v_mov_b32_e32 v4, s11
+; VI-NEXT:    v_mov_b32_e32 v5, s19
+; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s13
+; VI-NEXT:    v_bfi_b32 v7, s2, v4, v5
+; VI-NEXT:    v_mov_b32_e32 v4, s9
+; VI-NEXT:    v_mov_b32_e32 v5, s17
+; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; VI-NEXT:    v_bfi_b32 v5, s2, v4, v5
+; VI-NEXT:    s_add_u32 s2, s0, 16
+; VI-NEXT:    s_addc_u32 s3, s1, 0
+; VI-NEXT:    v_mov_b32_e32 v9, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s10
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v8, s2
+; VI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
   %result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
-  store <4 x double> %result, ptr addrspace(1) %out, align 8
+  store <4 x double> %result, ptr addrspace(1) %out, align 32
   ret void
 }
+
+define double @v_test_copysign_f64(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], double %sign) {
+; GCN-LABEL: v_test_copysign_f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_bfi_b32 v1, s4, v11, v21
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call double @llvm.copysign.f64(double %mag, double %sign)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_0(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; GCN-LABEL: v_test_copysign_f64_0:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v11
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call double @llvm.copysign.f64(double %mag, double 0.0)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_1(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; GCN-LABEL: v_test_copysign_f64_1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v11
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call double @llvm.copysign.f64(double %mag, double 1.0)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_10(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; GCN-LABEL: v_test_copysign_f64_10:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v11
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call double @llvm.copysign.f64(double %mag, double 10.0)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_neg1(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; GCN-LABEL: v_test_copysign_f64_neg1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_or_b32_e32 v1, 0x80000000, v11
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call double @llvm.copysign.f64(double %mag, double -1.0)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_neg10(ptr addrspace(1) %out, [8 x i32], double %mag) {
+; GCN-LABEL: v_test_copysign_f64_neg10:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_or_b32_e32 v1, 0x80000000, v11
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call double @llvm.copysign.f64(double %mag, double -10.0)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_f32(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], float %sign) {
+; GCN-LABEL: v_test_copysign_f64_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_mov_b32_e32 v0, v10
+; GCN-NEXT:    v_bfi_b32 v1, s4, v11, v20
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %sign.ext = fpext float %sign to double
+  %result = call double @llvm.copysign.f64(double %mag, double %sign.ext)
+  ret double %result
+}
+
+define double @v_test_copysign_f64_f16(ptr addrspace(1) %out, [8 x i32], double %mag, [8 x i32], half %sign) {
+; SI-LABEL: v_test_copysign_f64_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_brev_b32 s4, -2
+; SI-NEXT:    v_mov_b32_e32 v0, v10
+; SI-NEXT:    v_bfi_b32 v1, s4, v11, v20
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_test_copysign_f64_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v20
+; VI-NEXT:    s_brev_b32 s4, -2
+; VI-NEXT:    v_mov_b32_e32 v0, v10
+; VI-NEXT:    v_bfi_b32 v1, s4, v11, v1
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %sign.ext = fpext half %sign to double
+  %result = call double @llvm.copysign.f64(double %mag, double %sign.ext)
+  ret double %result
+}
+
+define <2 x double> @v_test_copysign_v2f64(ptr addrspace(1) %out, <2 x double> %mag, <2 x double> %sign) {
+; GCN-LABEL: v_test_copysign_v2f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_mov_b32_e32 v0, v2
+; GCN-NEXT:    v_bfi_b32 v1, s4, v3, v7
+; GCN-NEXT:    v_bfi_b32 v3, s4, v5, v9
+; GCN-NEXT:    v_mov_b32_e32 v2, v4
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
+  ret <2 x double> %result
+}
+
+define <3 x double> @v_test_copysign_v3f64(ptr addrspace(1) %out, <3 x double> %mag, <3 x double> %sign) {
+; GCN-LABEL: v_test_copysign_v3f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_mov_b32_e32 v0, v2
+; GCN-NEXT:    v_bfi_b32 v1, s4, v3, v9
+; GCN-NEXT:    v_bfi_b32 v3, s4, v5, v11
+; GCN-NEXT:    v_bfi_b32 v5, s4, v7, v13
+; GCN-NEXT:    v_mov_b32_e32 v2, v4
+; GCN-NEXT:    v_mov_b32_e32 v4, v6
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <3 x double> @llvm.copysign.v3f64(<3 x double> %mag, <3 x double> %sign)
+  ret <3 x double> %result
+}
+
+define <4 x double> @v_test_copysign_v4f64(ptr addrspace(1) %out, <4 x double> %mag, <4 x double> %sign) {
+; GCN-LABEL: v_test_copysign_v4f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, -2
+; GCN-NEXT:    v_mov_b32_e32 v0, v2
+; GCN-NEXT:    v_bfi_b32 v1, s4, v3, v11
+; GCN-NEXT:    v_bfi_b32 v3, s4, v5, v13
+; GCN-NEXT:    v_bfi_b32 v5, s4, v7, v15
+; GCN-NEXT:    v_bfi_b32 v7, s4, v9, v17
+; GCN-NEXT:    v_mov_b32_e32 v2, v4
+; GCN-NEXT:    v_mov_b32_e32 v4, v6
+; GCN-NEXT:    v_mov_b32_e32 v6, v8
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
+  ret <4 x double> %result
+}
+
+attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }


        


More information about the llvm-commits mailing list