[llvm] 09fc311 - AMDGPU/GlobalISel: Mostly fix BFI patterns

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 26 12:07:00 PST 2022


Author: Matt Arsenault
Date: 2022-01-26T15:06:50-05:00
New Revision: 09fc311af702e06fbb7a89cdee13a61face102ed

URL: https://github.com/llvm/llvm-project/commit/09fc311af702e06fbb7a89cdee13a61face102ed
DIFF: https://github.com/llvm/llvm-project/commit/09fc311af702e06fbb7a89cdee13a61face102ed.diff

LOG: AMDGPU/GlobalISel: Mostly fix BFI patterns

Most importantly, fixes constant bus errors in the 64-bit cases. It's
surprising to me these were even passing the selection test using
SReg_* sources. Also fixes pattern matching in the 32-bit cases, with
simple operands.

These patterns aren't working in a few cases, like with mixed SGPR
inputs. The patterns aren't looking through the SGPR->VGPR copies like
they need to. The vector cases also have some unmerges of build_vector
which are obscuring the inputs.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/test/CodeGen/AMDGPU/bfi_int.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index cba9a77864aa..7be63ae6964b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1810,44 +1810,44 @@ def BFIImm32 : PatFrag<
 // (y & x) | (z & ~x)
 def : AMDGPUPat <
   (DivergentBinFrag<or> (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
-  (V_BFI_B32_e64 $x, $y, $z)
+  (V_BFI_B32_e64 VSrc_b32:$x, VSrc_b32:$y, VSrc_b32:$z)
 >;
 
 // (y & C) | (z & ~C)
 def : AMDGPUPat <
   (BFIImm32 i32:$x, i32:$y, i32:$z),
-  (V_BFI_B32_e64 $x, $y, $z)
+  (V_BFI_B32_e64 VSrc_b32:$x, VSrc_b32:$y, VSrc_b32:$z)
 >;
 
 // 64-bit version
 def : AMDGPUPat <
   (DivergentBinFrag<or> (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
-  (REG_SEQUENCE SReg_64,
-    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
-               (i32 (EXTRACT_SUBREG SReg_64:$y, sub0)),
-               (i32 (EXTRACT_SUBREG SReg_64:$z, sub0))), sub0,
-    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub1)),
-               (i32 (EXTRACT_SUBREG SReg_64:$y, sub1)),
-               (i32 (EXTRACT_SUBREG SReg_64:$z, sub1))), sub1)
+  (REG_SEQUENCE VReg_64,
+    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
+               (i32 (EXTRACT_SUBREG VReg_64:$y, sub0)),
+               (i32 (EXTRACT_SUBREG VReg_64:$z, sub0))), sub0,
+    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub1)),
+               (i32 (EXTRACT_SUBREG VReg_64:$y, sub1)),
+               (i32 (EXTRACT_SUBREG VReg_64:$z, sub1))), sub1)
 >;
 
 // SHA-256 Ch function
 // z ^ (x & (y ^ z))
 def : AMDGPUPat <
   (DivergentBinFrag<xor> i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
-  (V_BFI_B32_e64 $x, $y, $z)
+  (V_BFI_B32_e64 VSrc_b32:$x, VSrc_b32:$y, VSrc_b32:$z)
 >;
 
 // 64-bit version
 def : AMDGPUPat <
   (DivergentBinFrag<xor> i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
-  (REG_SEQUENCE SReg_64,
-    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
-               (i32 (EXTRACT_SUBREG SReg_64:$y, sub0)),
-               (i32 (EXTRACT_SUBREG SReg_64:$z, sub0))), sub0,
-    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub1)),
-               (i32 (EXTRACT_SUBREG SReg_64:$y, sub1)),
-               (i32 (EXTRACT_SUBREG SReg_64:$z, sub1))), sub1)
+  (REG_SEQUENCE VReg_64,
+    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
+               (i32 (EXTRACT_SUBREG VReg_64:$y, sub0)),
+               (i32 (EXTRACT_SUBREG VReg_64:$z, sub0))), sub0,
+    (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub1)),
+               (i32 (EXTRACT_SUBREG VReg_64:$y, sub1)),
+               (i32 (EXTRACT_SUBREG VReg_64:$z, sub1))), sub1)
 >;
 
 def : AMDGPUPat <
@@ -2773,21 +2773,21 @@ def : AMDGPUPat <
 def : AMDGPUPat <
   (DivergentBinFrag<or> (and i32:$x, i32:$z),
                         (and i32:$y, (or i32:$x, i32:$z))),
-  (V_BFI_B32_e64 (V_XOR_B32_e64 i32:$x, i32:$y), i32:$z, i32:$y)
+  (V_BFI_B32_e64 (V_XOR_B32_e64 VSrc_b32:$x, VSrc_b32:$y), VSrc_b32:$z, VSrc_b32:$y)
 >;
 
 def : AMDGPUPat <
   (DivergentBinFrag<or> (and i64:$x, i64:$z),
                         (and i64:$y, (or i64:$x, i64:$z))),
-  (REG_SEQUENCE SReg_64,
-    (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
-                    (i32 (EXTRACT_SUBREG SReg_64:$y, sub0))),
-               (i32 (EXTRACT_SUBREG SReg_64:$z, sub0)),
-               (i32 (EXTRACT_SUBREG SReg_64:$y, sub0))), sub0,
-    (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub1)),
-                    (i32 (EXTRACT_SUBREG SReg_64:$y, sub1))),
-               (i32 (EXTRACT_SUBREG SReg_64:$z, sub1)),
-               (i32 (EXTRACT_SUBREG SReg_64:$y, sub1))), sub1)
+  (REG_SEQUENCE VReg_64,
+    (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
+                    (i32 (EXTRACT_SUBREG VReg_64:$y, sub0))),
+               (i32 (EXTRACT_SUBREG VReg_64:$z, sub0)),
+               (i32 (EXTRACT_SUBREG VReg_64:$y, sub0))), sub0,
+    (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub1)),
+                    (i32 (EXTRACT_SUBREG VReg_64:$y, sub1))),
+               (i32 (EXTRACT_SUBREG VReg_64:$z, sub1)),
+               (i32 (EXTRACT_SUBREG VReg_64:$y, sub1))), sub1)
 >;
 
 multiclass IntMed3Pat<Instruction med3Inst,

diff  --git a/llvm/test/CodeGen/AMDGPU/bfi_int.ll b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
index 35b13804990a..ab8648f19885 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_int.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
@@ -2,6 +2,8 @@
 ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX7 %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX8 %s
 ; RUN: llc -march=amdgcn -mcpu=gfx1031 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX10 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX8-GISEL %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1031 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX10-GISEL %s
 
 ; BFI_INT Definition pattern from ISA docs
 ; (y & x) | (z & ~x)
@@ -51,6 +53,36 @@ define amdgpu_kernel void @s_bfi_def_i32(i32 addrspace(1)* %out, i32 %x, i32 %y,
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bfi_def_i32:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX8-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_andn2_b32 s4, s4, s2
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s3, s2
+; GFX8-GISEL-NEXT:    s_or_b32 s2, s4, s2
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bfi_def_i32:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_clause 0x2
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX10-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_andn2_b32 s4, s4, s2
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s3, s2
+; GFX10-GISEL-NEXT:    s_or_b32 s2, s4, s2
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX10-GISEL-NEXT:    s_endpgm
 entry:
   %0 = xor i32 %x, -1
   %1 = and i32 %z, %0
@@ -79,6 +111,19 @@ define i32 @v_bfi_def_i32(i32 %x, i32 %y, i32 %z) {
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v1, v2
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bfi_def_i32:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_bfi_b32 v0, v0, v1, v2
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bfi_def_i32:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_bfi_b32 v0, v0, v1, v2
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %0 = xor i32 %x, -1
   %1 = and i32 %z, %0
@@ -134,6 +179,36 @@ define amdgpu_kernel void @s_bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX8-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_xor_b32 s3, s3, s4
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s2, s3
+; GFX8-GISEL-NEXT:    s_xor_b32 s2, s4, s2
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_clause 0x2
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX10-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_xor_b32 s3, s3, s4
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s2, s3
+; GFX10-GISEL-NEXT:    s_xor_b32 s2, s4, s2
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX10-GISEL-NEXT:    s_endpgm
 entry:
   %0 = xor i32 %y, %z
   %1 = and i32 %x, %0
@@ -161,6 +236,19 @@ define i32 @v_bfi_sha256_ch(i32 %x, i32 %y, i32 %z) {
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v1, v2
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_bfi_b32 v0, v0, v1, v2
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_bfi_b32 v0, v0, v1, v2
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %0 = xor i32 %y, %z
   %1 = and i32 %x, %0
@@ -185,6 +273,20 @@ define amdgpu_ps float @v_s_s_bfi_sha256_ch(i32 %x, i32 inreg %y, i32 inreg %z)
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, s0, s1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_s_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_xor_b32 s0, s0, s1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_s_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_xor_b32 s0, s0, s1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %xor0 = xor i32 %y, %z
   %and = and i32 %x, %xor0
@@ -210,6 +312,20 @@ define amdgpu_ps float @s_v_s_bfi_sha256_ch(i32 inreg %x, i32 %y, i32 inreg %z)
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, v0, s1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_v_s_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_v_s_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s1, v0
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %xor0 = xor i32 %y, %z
   %and = and i32 %x, %xor0
@@ -235,6 +351,17 @@ define amdgpu_ps float @s_s_v_bfi_sha256_ch(i32 inreg %x, i32 inreg %y, i32 %z)
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, s1, v0
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_s_v_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s0
+; GFX8-GISEL-NEXT:    v_bfi_b32 v0, v1, s1, v0
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_s_v_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_bfi_b32 v0, s0, s1, v0
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %xor0 = xor i32 %y, %z
   %and = and i32 %x, %xor0
@@ -258,6 +385,16 @@ define amdgpu_ps float @s_v_v_bfi_sha256_ch(i32 inreg %x, i32 %y, i32 %z) {
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, v0, v1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_v_v_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_v_v_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_bfi_b32 v0, s0, v0, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %xor0 = xor i32 %y, %z
   %and = and i32 %x, %xor0
@@ -281,6 +418,16 @@ define amdgpu_ps float @v_s_v_bfi_sha256_ch(i32 %x, i32 inreg %y, i32 %z) {
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, s0, v1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_v_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_bfi_b32 v0, v0, s0, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_v_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_bfi_b32 v0, v0, s0, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %xor0 = xor i32 %y, %z
   %and = and i32 %x, %xor0
@@ -304,6 +451,20 @@ define amdgpu_ps float @v_v_s_bfi_sha256_ch(i32 %x, i32 %y, i32 inreg %z) {
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v1, s0
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_v_s_bfi_sha256_ch:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, s0, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v0, v1
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_v_s_bfi_sha256_ch:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, s0, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v1
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %xor0 = xor i32 %y, %z
   %and = and i32 %x, %xor0
@@ -362,6 +523,38 @@ define amdgpu_kernel void @s_bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bfi_sha256_ma:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX8-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_and_b32 s5, s2, s4
+; GFX8-GISEL-NEXT:    s_or_b32 s2, s2, s4
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s3, s2
+; GFX8-GISEL-NEXT:    s_or_b32 s2, s5, s2
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bfi_sha256_ma:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_clause 0x2
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX10-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_or_b32 s5, s2, s4
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s2, s4
+; GFX10-GISEL-NEXT:    s_and_b32 s3, s3, s5
+; GFX10-GISEL-NEXT:    s_or_b32 s2, s2, s3
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX10-GISEL-NEXT:    s_endpgm
 entry:
   %0 = and i32 %x, %z
   %1 = or i32 %x, %z
@@ -393,6 +586,21 @@ define i32 @v_bfi_sha256_ma(i32 %x, i32 %y, i32 %z) {
 ; GFX10-NEXT:    v_xor_b32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v2, v1
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bfi_sha256_ma:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GFX8-GISEL-NEXT:    v_bfi_b32 v0, v0, v2, v1
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bfi_sha256_ma:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GFX10-GISEL-NEXT:    v_bfi_b32 v0, v0, v2, v1
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %0 = and i32 %x, %z
   %1 = or i32 %x, %z
@@ -423,6 +631,29 @@ define <2 x i32> @v_bitselect_v2i32_pat1(<2 x i32> %a, <2 x i32> %b, <2 x i32> %
 ; GFX10-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX10-NEXT:    v_bfi_b32 v1, v3, v1, v5
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bitselect_v2i32_pat1:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bitselect_v2i32_pat1:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %xor.0 = xor <2 x i32> %a, %mask
   %and = and <2 x i32> %xor.0, %b
   %bitselect = xor <2 x i32> %and, %mask
@@ -451,6 +682,33 @@ define i64 @v_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v2, v4
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, v3, v5
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, v1, v3
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, -1, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, -1, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v6, -1, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v7, -1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, v6, v4
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, v7, v5
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -480,6 +738,30 @@ define amdgpu_ps <2 x float> @v_s_s_bitselect_i64_pat_0(i64 %a, i64 inreg %b, i6
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, s0, s2
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, s1, s3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_s_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, s1, v1
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, -1, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, -1, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_s_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v2, -1, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v3, -1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s2, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s3, v3
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -510,6 +792,24 @@ define amdgpu_ps <2 x float> @s_v_s_bitselect_i64_pat_0(i64 inreg %a, i64 %b, i6
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, v0, s2
 ; GFX10-NEXT:    v_bfi_b32 v1, s1, v1, s3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_v_s_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    s_andn2_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_v_s_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    s_andn2_b64 s[0:1], s[2:3], s[0:1]
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -540,6 +840,26 @@ define amdgpu_ps <2 x float> @s_s_v_bitselect_i64_pat_0(i64 inreg %a, i64 inreg
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, s2, v0
 ; GFX10-NEXT:    v_bfi_b32 v1, s1, s3, v1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_s_v_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX8-GISEL-NEXT:    s_not_b64 s[0:1], s[0:1]
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_s_v_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_not_b64 s[4:5], s[0:1]
+; GFX10-GISEL-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s5, v1
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -566,6 +886,30 @@ define amdgpu_ps <2 x float> @v_v_s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 inre
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v2, s0
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, v3, s1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_v_s_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, v1, v3
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, -1, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, -1, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_v_s_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v4, -1, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v5, -1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s0, v4
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s1, v5
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -592,6 +936,30 @@ define amdgpu_ps <2 x float> @v_s_v_bitselect_i64_pat_0(i64 %a, i64 inreg %b, i6
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, s0, v2
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, s1, v3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_v_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v4, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v5, s1, v1
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, -1, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, -1, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v5, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_v_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v4, -1, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v5, -1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, v4, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, v5, v3
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -618,6 +986,28 @@ define amdgpu_ps <2 x float> @s_v_v_bitselect_i64_pat_0(i64 inreg %a, i64 %b, i6
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, v0, v2
 ; GFX10-NEXT:    v_bfi_b32 v1, s1, v1, v3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_v_v_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    s_not_b64 s[0:1], s[0:1]
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, s0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, s1, v3
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_v_v_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_not_b64 s[2:3], s[0:1]
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s2, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s3, v3
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -648,6 +1038,29 @@ define i64 @v_bitselect_i64_pat_1(i64 %a, i64 %b, i64 %mask) {
 ; GFX10-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX10-NEXT:    v_bfi_b32 v1, v3, v1, v5
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bitselect_i64_pat_1:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bitselect_i64_pat_1:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -676,6 +1089,26 @@ define amdgpu_ps <2 x float> @v_s_s_bitselect_i64_pat_1(i64 %a, i64 inreg %b, i6
 ; GFX10-NEXT:    v_bfi_b32 v0, s0, v0, s2
 ; GFX10-NEXT:    v_bfi_b32 v1, s1, v1, s3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_s_bitselect_i64_pat_1:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_s_bitselect_i64_pat_1:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -705,6 +1138,26 @@ define amdgpu_ps <2 x float> @s_s_v_bitselect_i64_pat_1(i64 inreg %a, i64 inreg
 ; GFX10-NEXT:    v_bfi_b32 v0, s2, s0, v0
 ; GFX10-NEXT:    v_bfi_b32 v1, s3, s1, v1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_s_v_bitselect_i64_pat_1:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v2, s0, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v3, s1, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, s2, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, s3, v3
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_s_v_bitselect_i64_pat_1:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v2, s0, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v3, s1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s2, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s3, v3
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v3, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -734,6 +1187,24 @@ define amdgpu_ps <2 x float> @s_v_s_bitselect_i64_pat_1(i64 inreg %a, i64 %b, i6
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, s0, s2
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, s1, s3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_v_s_bitselect_i64_pat_1:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_v_s_bitselect_i64_pat_1:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -763,6 +1234,29 @@ define i64 @v_bitselect_i64_pat_2(i64 %a, i64 %b, i64 %mask) {
 ; GFX10-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX10-NEXT:    v_bfi_b32 v1, v3, v1, v5
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bitselect_i64_pat_2:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bitselect_i64_pat_2:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -797,6 +1291,33 @@ define i64 @v_bfi_sha256_ma_i64(i64 %x, i64 %y, i64 %z) {
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v4, v2
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, v5, v3
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-GISEL-LABEL: v_bfi_sha256_ma_i64:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v6, v0, v4
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v7, v1, v5
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v0, v4
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v1, v5
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v6, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v7, v1
+; GFX8-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: v_bfi_sha256_ma_i64:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v6, v0, v4
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v7, v1, v5
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v4
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v5
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, v2, v6
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, v3, v7
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %and0 = and i64 %x, %z
   %or0 = or i64 %x, %z
@@ -833,6 +1354,30 @@ define amdgpu_ps <2 x float> @v_s_s_bfi_sha256_ma_i64(i64 %x, i64 inreg %y, i64
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, s2, s0
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, s3, s1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_s_bfi_sha256_ma_i64:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, s2, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, s3, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_s_bfi_sha256_ma_i64:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v2, s2, v0
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v3, s3, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s3, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s1, v3
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %and0 = and i64 %x, %z
   %or0 = or i64 %x, %z
@@ -866,6 +1411,26 @@ define amdgpu_ps <2 x float> @s_v_s_bfi_sha256_ma_i64(i64 inreg %x, i64 %y, i64
 ; GFX10-NEXT:    v_bfi_b32 v0, v2, s2, v0
 ; GFX10-NEXT:    v_bfi_b32 v1, v3, s3, v1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_v_s_bfi_sha256_ma_i64:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_and_b64 s[4:5], s[0:1], s[2:3]
+; GFX8-GISEL-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, s4, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, s5, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_v_s_bfi_sha256_ma_i64:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_or_b64 s[4:5], s[0:1], s[2:3]
+; GFX10-GISEL-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s5, v1
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %and0 = and i64 %x, %z
   %or0 = or i64 %x, %z
@@ -903,6 +1468,30 @@ define amdgpu_ps <2 x float> @s_s_v_bfi_sha256_ma_i64(i64 inreg %x, i64 inreg %y
 ; GFX10-NEXT:    v_bfi_b32 v0, v2, v0, s2
 ; GFX10-NEXT:    v_bfi_b32 v1, v3, v1, s3
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: s_s_v_bfi_sha256_ma_i64:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v2, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v3, s1, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s3, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v3, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: s_s_v_bfi_sha256_ma_i64:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v2, s0, v0
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v3, s1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s2, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s3, v3
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %and0 = and i64 %x, %z
   %or0 = or i64 %x, %z
@@ -936,6 +1525,30 @@ define amdgpu_ps <2 x float> @v_s_v_bfi_sha256_ma_i64(i64 %x, i64 inreg %y, i64
 ; GFX10-NEXT:    v_bfi_b32 v0, v0, v2, s0
 ; GFX10-NEXT:    v_bfi_b32 v1, v1, v3, s1
 ; GFX10-NEXT:    ; return to shader part epilog
+;
+; GFX8-GISEL-LABEL: v_s_v_bfi_sha256_ma_i64:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v4, v0, v2
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v5, v1, v3
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v0, s0, v0
+; GFX8-GISEL-NEXT:    v_and_b32_e32 v1, s1, v1
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v5, v1
+; GFX8-GISEL-NEXT:    ; return to shader part epilog
+;
+; GFX10-GISEL-LABEL: v_s_v_bfi_sha256_ma_i64:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v4, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v5, v1, v3
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v2, s0, v4
+; GFX10-GISEL-NEXT:    v_and_b32_e32 v3, s1, v5
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX10-GISEL-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX10-GISEL-NEXT:    ; return to shader part epilog
 entry:
   %and0 = and i64 %x, %z
   %or0 = or i64 %x, %z
@@ -993,6 +1606,43 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bitselect_i64_pat_0:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_and_b64 s[2:3], s[4:5], s[6:7]
+; GFX8-GISEL-NEXT:    s_andn2_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-GISEL-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX8-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX8-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX8-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bitselect_i64_pat_0:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_clause 0x1
+; GFX10-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_and_b64 s[2:3], s[4:5], s[6:7]
+; GFX10-GISEL-NEXT:    s_andn2_b64 s[0:1], s[0:1], s[4:5]
+; GFX10-GISEL-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX10-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX10-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX10-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX10-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
+; GFX10-GISEL-NEXT:    s_endpgm
   %and0 = and i64 %a, %b
   %not.a = xor i64 %a, -1
   %and1 = and i64 %not.a, %mask
@@ -1050,6 +1700,43 @@ define amdgpu_kernel void @s_bitselect_i64_pat_1(i64 %a, i64 %b, i64 %mask) {
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bitselect_i64_pat_1:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_xor_b64 s[2:3], s[4:5], s[0:1]
+; GFX8-GISEL-NEXT:    s_and_b64 s[2:3], s[2:3], s[6:7]
+; GFX8-GISEL-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX8-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX8-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX8-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bitselect_i64_pat_1:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_clause 0x1
+; GFX10-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_xor_b64 s[2:3], s[4:5], s[0:1]
+; GFX10-GISEL-NEXT:    s_and_b64 s[2:3], s[2:3], s[6:7]
+; GFX10-GISEL-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GFX10-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX10-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX10-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX10-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
+; GFX10-GISEL-NEXT:    s_endpgm
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -1107,6 +1794,43 @@ define amdgpu_kernel void @s_bitselect_i64_pat_2(i64 %a, i64 %b, i64 %mask) {
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bitselect_i64_pat_2:
+; GFX8-GISEL:       ; %bb.0:
+; GFX8-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_xor_b64 s[2:3], s[4:5], s[0:1]
+; GFX8-GISEL-NEXT:    s_and_b64 s[2:3], s[2:3], s[6:7]
+; GFX8-GISEL-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX8-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX8-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX8-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bitselect_i64_pat_2:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_clause 0x1
+; GFX10-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_xor_b64 s[2:3], s[4:5], s[0:1]
+; GFX10-GISEL-NEXT:    s_and_b64 s[2:3], s[2:3], s[6:7]
+; GFX10-GISEL-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GFX10-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX10-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX10-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX10-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
+; GFX10-GISEL-NEXT:    s_endpgm
   %xor.0 = xor i64 %a, %mask
   %and = and i64 %xor.0, %b
   %bitselect = xor i64 %and, %mask
@@ -1167,6 +1891,45 @@ define amdgpu_kernel void @s_bfi_sha256_ma_i64(i64 %x, i64 %y, i64 %z) {
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
 ; GFX10-NEXT:    s_endpgm
+;
+; GFX8-GISEL-LABEL: s_bfi_sha256_ma_i64:
+; GFX8-GISEL:       ; %bb.0: ; %entry
+; GFX8-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-GISEL-NEXT:    s_and_b64 s[2:3], s[4:5], s[0:1]
+; GFX8-GISEL-NEXT:    s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX8-GISEL-NEXT:    s_and_b64 s[0:1], s[6:7], s[0:1]
+; GFX8-GISEL-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX8-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX8-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX8-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; GFX8-GISEL-NEXT:    s_endpgm
+;
+; GFX10-GISEL-LABEL: s_bfi_sha256_ma_i64:
+; GFX10-GISEL:       ; %bb.0: ; %entry
+; GFX10-GISEL-NEXT:    s_clause 0x1
+; GFX10-GISEL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-GISEL-NEXT:    s_or_b64 s[2:3], s[4:5], s[0:1]
+; GFX10-GISEL-NEXT:    s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX10-GISEL-NEXT:    s_and_b64 s[2:3], s[6:7], s[2:3]
+; GFX10-GISEL-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-GISEL-NEXT:    s_add_u32 s0, s0, 10
+; GFX10-GISEL-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX10-GISEL-NEXT:    s_and_b32 s2, s2, 1
+; GFX10-GISEL-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX10-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
+; GFX10-GISEL-NEXT:    s_endpgm
 entry:
   %and0 = and i64 %x, %z
   %or0 = or i64 %x, %z


        


More information about the llvm-commits mailing list