[llvm] e91feee - [AMDGPU] Add ISD::FSHR -> ALIGNBIT support

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 12 13:17:25 PDT 2020


Author: Simon Pilgrim
Date: 2020-03-12T20:16:57Z
New Revision: e91feeed21ee16abdb73f6e8cd471a253136e2cf

URL: https://github.com/llvm/llvm-project/commit/e91feeed21ee16abdb73f6e8cd471a253136e2cf
DIFF: https://github.com/llvm/llvm-project/commit/e91feeed21ee16abdb73f6e8cd471a253136e2cf.diff

LOG: [AMDGPU] Add ISD::FSHR -> ALIGNBIT support

This patch allows ISD::FSHR(i32) patterns to lower to ALIGNBIT instructions.

This improves test coverage of ISD::FSHR matching - x86 has both FSHL/FSHR instructions and we prefer FSHL by default.

Differential Revision: https://reviews.llvm.org/D76070

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
    llvm/lib/Target/AMDGPU/EvergreenInstructions.td
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
    llvm/test/CodeGen/AMDGPU/fshl.ll
    llvm/test/CodeGen/AMDGPU/fshr.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
    llvm/test/CodeGen/AMDGPU/permute.ll
    llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
    llvm/test/CodeGen/AMDGPU/shift-i128.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 7b659d3c61a3..be02054742ec 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -333,6 +333,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::SUBE, VT, Legal);
   }
 
+  // The hardware supports 32-bit FSHR, but not FSHL.
+  setOperationAction(ISD::FSHR, MVT::i32, Legal);
+
   // The hardware supports 32-bit ROTR, but not ROTL.
   setOperationAction(ISD::ROTL, MVT::i32, Expand);
   setOperationAction(ISD::ROTL, MVT::i64, Expand);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 0c395d2fef7c..dedbe15ab029 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -736,6 +736,12 @@ multiclass BFEPattern <Instruction UBFE, Instruction SBFE, Instruction MOV> {
   >;
 }
 
+// fshr pattern
+class FSHRPattern <Instruction BIT_ALIGN> : AMDGPUPat <
+  (fshr i32:$src0, i32:$src1, i32:$src2),
+  (BIT_ALIGN $src0, $src1, $src2)
+>;
+
 // rotr pattern
 class ROTRPattern <Instruction BIT_ALIGN> : AMDGPUPat <
   (rotr i32:$src0, i32:$src1),

diff  --git a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td
index 88e554ae0bcc..1b4bbf1d089f 100644
--- a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td
+++ b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td
@@ -422,6 +422,7 @@ def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
 def : UMad24Pat<MULADD_UINT24_eg>;
 
 def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
+def : FSHRPattern <BIT_ALIGN_INT_eg>;
 def : ROTRPattern <BIT_ALIGN_INT_eg>;
 def MULADD_eg : MULADD_Common<0x14>;
 def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index e0cb46ae6a71..49bd9cbfe380 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1484,6 +1484,7 @@ def : UMad24Pat<V_MAD_U32_U24, 1>;
 
 // FIXME: This should only be done for VALU inputs
 defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
+def : FSHRPattern <V_ALIGNBIT_B32>;
 def : ROTRPattern <V_ALIGNBIT_B32>;
 
 def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))),

diff  --git a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
index b160b512b863..c510ea7216f3 100644
--- a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
@@ -163,9 +163,8 @@ define void @undef_lo2_v4i16(<2 x i16> %arg0) {
 ; GFX8-LABEL: undef_lo2_v4i16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT:    v_alignbit_b32 v0, v1, v0, 16
 ; GFX8-NEXT:    ;;#ASMSTART
 ; GFX8-NEXT:    ; use v[0:1]
 ; GFX8-NEXT:    ;;#ASMEND
@@ -190,9 +189,8 @@ define void @undef_lo2_v4f16(<2 x half> %arg0) {
 ; GFX8-LABEL: undef_lo2_v4f16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT:    v_alignbit_b32 v0, v1, v0, 16
 ; GFX8-NEXT:    ;;#ASMSTART
 ; GFX8-NEXT:    ; use v[0:1]
 ; GFX8-NEXT:    ;;#ASMEND

diff  --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index 43b9e5f726a4..0e17df416cd2 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -97,10 +97,8 @@ define amdgpu_kernel void @fshl_i32_imm(i32 addrspace(1)* %in, i32 %x, i32 %y) {
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshr_b32 s1, s1, 25
-; SI-NEXT:    s_lshl_b32 s0, s0, 7
-; SI-NEXT:    s_or_b32 s0, s0, s1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_alignbit_b32 v0, s0, v0, 25
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -109,12 +107,10 @@ define amdgpu_kernel void @fshl_i32_imm(i32 addrspace(1)* %in, i32 %x, i32 %y) {
 ; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x2c
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_alignbit_b32 v2, s0, v0, 25
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    s_lshr_b32 s1, s1, 25
-; VI-NEXT:    s_lshl_b32 s0, s0, 7
-; VI-NEXT:    s_or_b32 s0, s0, s1
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_mov_b32_e32 v2, s0
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 ;
@@ -123,28 +119,24 @@ define amdgpu_kernel void @fshl_i32_imm(i32 addrspace(1)* %in, i32 %x, i32 %y) {
 ; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x2c
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s1
+; GFX9-NEXT:    v_alignbit_b32 v2, s0, v0, 25
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 25
-; GFX9-NEXT:    s_lshl_b32 s0, s0, 7
-; GFX9-NEXT:    s_or_b32 s0, s0, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshl_i32_imm:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
-; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; R600-NEXT:    ALU 3, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     LSHL T0.W, KC0[2].Z, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[2].W, literal.y,
-; R600-NEXT:    7(9.809089e-45), 25(3.503246e-44)
-; R600-NEXT:     OR_INT T0.X, PV.W, PS,
-; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; R600-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T1.X, KC0[2].Z, KC0[2].W, literal.x,
+; R600-NEXT:    25(3.503246e-44), 0(0.000000e+00)
 entry:
   %0 = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 7)
   store i32 %0, i32 addrspace(1)* %in
@@ -283,14 +275,10 @@ define amdgpu_kernel void @fshl_v2i32_imm(<2 x i32> addrspace(1)* %in, <2 x i32>
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b32 s3, s3, 9
-; SI-NEXT:    s_lshr_b32 s1, s1, 23
-; SI-NEXT:    s_lshr_b32 s0, s0, 25
-; SI-NEXT:    s_lshl_b32 s2, s2, 7
-; SI-NEXT:    s_or_b32 s1, s3, s1
-; SI-NEXT:    s_or_b32 s0, s2, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_alignbit_b32 v1, s3, v0, 23
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_alignbit_b32 v0, s2, v0, 25
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -300,15 +288,11 @@ define amdgpu_kernel void @fshl_v2i32_imm(<2 x i32> addrspace(1)* %in, <2 x i32>
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_alignbit_b32 v1, s5, v0, 23
+; VI-NEXT:    v_alignbit_b32 v0, s4, v2, 25
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
-; VI-NEXT:    s_lshl_b32 s5, s5, 9
-; VI-NEXT:    s_lshr_b32 s1, s1, 23
-; VI-NEXT:    s_lshr_b32 s0, s0, 25
-; VI-NEXT:    s_lshl_b32 s4, s4, 7
-; VI-NEXT:    s_or_b32 s1, s5, s1
-; VI-NEXT:    s_or_b32 s0, s4, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
@@ -319,34 +303,26 @@ define amdgpu_kernel void @fshl_v2i32_imm(<2 x i32> addrspace(1)* %in, <2 x i32>
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s0
+; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, 23
+; GFX9-NEXT:    v_alignbit_b32 v0, s4, v2, 25
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
-; GFX9-NEXT:    s_lshl_b32 s5, s5, 9
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 23
-; GFX9-NEXT:    s_lshr_b32 s0, s0, 25
-; GFX9-NEXT:    s_lshl_b32 s4, s4, 7
-; GFX9-NEXT:    s_or_b32 s1, s5, s1
-; GFX9-NEXT:    s_or_b32 s0, s4, s0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshl_v2i32_imm:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 9, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     LSHL T0.W, KC0[3].X, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[3].Z, literal.y,
-; R600-NEXT:    9(1.261169e-44), 23(3.222986e-44)
-; R600-NEXT:     OR_INT T0.Y, PV.W, PS,
-; R600-NEXT:     LSHL T0.W, KC0[2].W, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[3].Y, literal.y,
-; R600-NEXT:    7(9.809089e-45), 25(3.503246e-44)
-; R600-NEXT:     OR_INT T0.X, PV.W, PS,
+; R600-NEXT:     BIT_ALIGN_INT * T0.Y, KC0[3].X, KC0[3].Z, literal.x,
+; R600-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[2].W, KC0[3].Y, literal.x,
+; R600-NEXT:    25(3.503246e-44), 0(0.000000e+00)
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:
@@ -557,22 +533,14 @@ define amdgpu_kernel void @fshl_v4i32_imm(<4 x i32> addrspace(1)* %in, <4 x i32>
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b32 s11, s11, 1
-; SI-NEXT:    s_lshr_b32 s3, s3, 31
-; SI-NEXT:    s_lshr_b32 s2, s2, 23
-; SI-NEXT:    s_lshl_b32 s10, s10, 9
-; SI-NEXT:    s_lshr_b32 s1, s1, 25
-; SI-NEXT:    s_lshl_b32 s9, s9, 7
-; SI-NEXT:    s_lshr_b32 s0, s0, 31
-; SI-NEXT:    s_lshl_b32 s8, s8, 1
-; SI-NEXT:    s_or_b32 s3, s11, s3
-; SI-NEXT:    s_or_b32 s2, s10, s2
-; SI-NEXT:    s_or_b32 s1, s9, s1
-; SI-NEXT:    s_or_b32 s0, s8, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_alignbit_b32 v3, s11, v0, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_alignbit_b32 v2, s10, v0, 23
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_alignbit_b32 v1, s9, v0, 25
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mov_b32_e32 v2, s2
-; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    v_alignbit_b32 v0, s8, v0, 31
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -583,23 +551,15 @@ define amdgpu_kernel void @fshl_v4i32_imm(<4 x i32> addrspace(1)* %in, <4 x i32>
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x44
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    s_lshl_b32 s7, s7, 1
-; VI-NEXT:    s_lshr_b32 s3, s3, 31
-; VI-NEXT:    s_lshr_b32 s2, s2, 23
-; VI-NEXT:    s_lshl_b32 s6, s6, 9
-; VI-NEXT:    s_lshr_b32 s1, s1, 25
-; VI-NEXT:    s_lshl_b32 s5, s5, 7
-; VI-NEXT:    s_lshr_b32 s0, s0, 31
-; VI-NEXT:    s_lshl_b32 s4, s4, 1
-; VI-NEXT:    s_or_b32 s3, s7, s3
-; VI-NEXT:    s_or_b32 s2, s6, s2
-; VI-NEXT:    s_or_b32 s1, s5, s1
-; VI-NEXT:    s_or_b32 s0, s4, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mov_b32_e32 v2, s2
-; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v5, s9
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_alignbit_b32 v3, s7, v0, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_alignbit_b32 v2, s6, v1, 23
+; VI-NEXT:    v_alignbit_b32 v1, s5, v0, 25
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_alignbit_b32 v0, s4, v0, 31
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
 ;
@@ -610,49 +570,33 @@ define amdgpu_kernel void @fshl_v4i32_imm(<4 x i32> addrspace(1)* %in, <4 x i32>
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x44
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s8
-; GFX9-NEXT:    s_lshl_b32 s7, s7, 1
-; GFX9-NEXT:    s_lshr_b32 s3, s3, 31
-; GFX9-NEXT:    s_lshr_b32 s2, s2, 23
-; GFX9-NEXT:    s_lshl_b32 s6, s6, 9
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 25
-; GFX9-NEXT:    s_lshl_b32 s5, s5, 7
-; GFX9-NEXT:    s_lshr_b32 s0, s0, 31
-; GFX9-NEXT:    s_lshl_b32 s4, s4, 1
-; GFX9-NEXT:    s_or_b32 s3, s7, s3
-; GFX9-NEXT:    s_or_b32 s2, s6, s2
-; GFX9-NEXT:    s_or_b32 s1, s5, s1
-; GFX9-NEXT:    s_or_b32 s0, s4, s0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_mov_b32_e32 v2, s2
-; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s9
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_alignbit_b32 v3, s7, v0, 31
+; GFX9-NEXT:    v_mov_b32_e32 v0, s1
+; GFX9-NEXT:    v_alignbit_b32 v2, s6, v1, 23
+; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, 25
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, 31
 ; GFX9-NEXT:    global_store_dwordx4 v[4:5], v[0:3], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshl_v4i32_imm:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 17, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 9, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     LSHL T0.W, KC0[4].X, 1,
-; R600-NEXT:     LSHR * T1.W, KC0[5].X, literal.x,
+; R600-NEXT:     BIT_ALIGN_INT * T0.W, KC0[4].X, KC0[5].X, literal.x,
 ; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; R600-NEXT:     LSHL T0.Z, KC0[3].W, literal.x,
-; R600-NEXT:     LSHR T2.W, KC0[4].W, literal.y,
-; R600-NEXT:     OR_INT * T0.W, PV.W, PS,
-; R600-NEXT:    9(1.261169e-44), 23(3.222986e-44)
-; R600-NEXT:     OR_INT T0.Z, PV.Z, PV.W,
-; R600-NEXT:     LSHL T1.W, KC0[3].Z, literal.x,
-; R600-NEXT:     LSHR * T2.W, KC0[4].Z, literal.y,
-; R600-NEXT:    7(9.809089e-45), 25(3.503246e-44)
-; R600-NEXT:     OR_INT T0.Y, PV.W, PS,
-; R600-NEXT:     LSHL T1.W, KC0[3].Y, 1,
-; R600-NEXT:     LSHR * T2.W, KC0[4].Y, literal.x,
+; R600-NEXT:     BIT_ALIGN_INT * T0.Z, KC0[3].W, KC0[4].W, literal.x,
+; R600-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T0.Y, KC0[3].Z, KC0[4].Z, literal.x,
+; R600-NEXT:    25(3.503246e-44), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[3].Y, KC0[4].Y, literal.x,
 ; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; R600-NEXT:     OR_INT T0.X, PV.W, PS,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll
index 5ee8e2eeb189..f90863b26d5a 100644
--- a/llvm/test/CodeGen/AMDGPU/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshr.ll
@@ -16,15 +16,9 @@ define amdgpu_kernel void @fshr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_and_b32 s2, s2, 31
-; SI-NEXT:    s_sub_i32 s8, 32, s2
-; SI-NEXT:    s_lshr_b32 s3, s1, s2
-; SI-NEXT:    s_lshl_b32 s0, s0, s8
-; SI-NEXT:    s_or_b32 s0, s0, s3
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -33,15 +27,9 @@ define amdgpu_kernel void @fshr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_and_b32 s2, s2, 31
-; VI-NEXT:    s_sub_i32 s3, 32, s2
 ; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    s_lshr_b32 s1, s1, s2
-; VI-NEXT:    s_lshl_b32 s0, s0, s3
-; VI-NEXT:    s_or_b32 s0, s0, s1
-; VI-NEXT:    v_mov_b32_e32 v1, s0
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
-; VI-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_alignbit_b32 v2, s0, v0, v1
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    flat_store_dword v[0:1], v2
@@ -52,15 +40,9 @@ define amdgpu_kernel void @fshr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_and_b32 s2, s2, 31
-; GFX9-NEXT:    s_sub_i32 s3, 32, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX9-NEXT:    s_lshl_b32 s0, s0, s3
-; GFX9-NEXT:    s_or_b32 s0, s0, s1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s0
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_alignbit_b32 v2, s0, v0, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
@@ -68,21 +50,14 @@ define amdgpu_kernel void @fshr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ;
 ; R600-LABEL: fshr_i32:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 9, @4, KC0[CB0:0-32], KC1[]
-; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; R600-NEXT:    ALU 2, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     AND_INT * T0.W, KC0[3].X, literal.x,
-; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; R600-NEXT:     SUB_INT * T1.W, literal.x, PV.W,
-; R600-NEXT:    32(4.484155e-44), 0(0.000000e+00)
-; R600-NEXT:     LSHL T1.W, KC0[2].Z, PV.W,
-; R600-NEXT:     LSHR * T2.W, KC0[2].W, T0.W,
-; R600-NEXT:     OR_INT * T1.W, PV.W, PS,
-; R600-NEXT:     CNDE_INT T0.X, T0.W, KC0[2].W, PV.W,
-; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; R600-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T1.X, KC0[2].Z, KC0[2].W, KC0[3].X,
 entry:
   %0 = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z)
   store i32 %0, i32 addrspace(1)* %in
@@ -97,10 +72,8 @@ define amdgpu_kernel void @fshr_i32_imm(i32 addrspace(1)* %in, i32 %x, i32 %y) {
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshr_b32 s1, s1, 7
-; SI-NEXT:    s_lshl_b32 s0, s0, 25
-; SI-NEXT:    s_or_b32 s0, s0, s1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_alignbit_b32 v0, s0, v0, 7
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -109,12 +82,10 @@ define amdgpu_kernel void @fshr_i32_imm(i32 addrspace(1)* %in, i32 %x, i32 %y) {
 ; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x2c
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_alignbit_b32 v2, s0, v0, 7
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    s_lshr_b32 s1, s1, 7
-; VI-NEXT:    s_lshl_b32 s0, s0, 25
-; VI-NEXT:    s_or_b32 s0, s0, s1
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_mov_b32_e32 v2, s0
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 ;
@@ -123,28 +94,24 @@ define amdgpu_kernel void @fshr_i32_imm(i32 addrspace(1)* %in, i32 %x, i32 %y) {
 ; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x2c
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s1
+; GFX9-NEXT:    v_alignbit_b32 v2, s0, v0, 7
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 7
-; GFX9-NEXT:    s_lshl_b32 s0, s0, 25
-; GFX9-NEXT:    s_or_b32 s0, s0, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshr_i32_imm:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
-; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; R600-NEXT:    ALU 3, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     LSHL T0.W, KC0[2].Z, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[2].W, literal.y,
-; R600-NEXT:    25(3.503246e-44), 7(9.809089e-45)
-; R600-NEXT:     OR_INT T0.X, PV.W, PS,
-; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; R600-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T1.X, KC0[2].Z, KC0[2].W, literal.x,
+; R600-NEXT:    7(9.809089e-45), 0(0.000000e+00)
 entry:
   %0 = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 7)
   store i32 %0, i32 addrspace(1)* %in
@@ -283,14 +250,10 @@ define amdgpu_kernel void @fshr_v2i32_imm(<2 x i32> addrspace(1)* %in, <2 x i32>
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b32 s3, s3, 23
-; SI-NEXT:    s_lshr_b32 s1, s1, 9
-; SI-NEXT:    s_lshr_b32 s0, s0, 7
-; SI-NEXT:    s_lshl_b32 s2, s2, 25
-; SI-NEXT:    s_or_b32 s1, s3, s1
-; SI-NEXT:    s_or_b32 s0, s2, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_alignbit_b32 v1, s3, v0, 9
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_alignbit_b32 v0, s2, v0, 7
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -300,15 +263,11 @@ define amdgpu_kernel void @fshr_v2i32_imm(<2 x i32> addrspace(1)* %in, <2 x i32>
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_alignbit_b32 v1, s5, v0, 9
+; VI-NEXT:    v_alignbit_b32 v0, s4, v2, 7
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
-; VI-NEXT:    s_lshl_b32 s5, s5, 23
-; VI-NEXT:    s_lshr_b32 s1, s1, 9
-; VI-NEXT:    s_lshr_b32 s0, s0, 7
-; VI-NEXT:    s_lshl_b32 s4, s4, 25
-; VI-NEXT:    s_or_b32 s1, s5, s1
-; VI-NEXT:    s_or_b32 s0, s4, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
@@ -319,34 +278,26 @@ define amdgpu_kernel void @fshr_v2i32_imm(<2 x i32> addrspace(1)* %in, <2 x i32>
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s0
+; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, 9
+; GFX9-NEXT:    v_alignbit_b32 v0, s4, v2, 7
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
-; GFX9-NEXT:    s_lshl_b32 s5, s5, 23
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 9
-; GFX9-NEXT:    s_lshr_b32 s0, s0, 7
-; GFX9-NEXT:    s_lshl_b32 s4, s4, 25
-; GFX9-NEXT:    s_or_b32 s1, s5, s1
-; GFX9-NEXT:    s_or_b32 s0, s4, s0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshr_v2i32_imm:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 9, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     LSHL T0.W, KC0[3].X, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[3].Z, literal.y,
-; R600-NEXT:    23(3.222986e-44), 9(1.261169e-44)
-; R600-NEXT:     OR_INT T0.Y, PV.W, PS,
-; R600-NEXT:     LSHL T0.W, KC0[2].W, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[3].Y, literal.y,
-; R600-NEXT:    25(3.503246e-44), 7(9.809089e-45)
-; R600-NEXT:     OR_INT T0.X, PV.W, PS,
+; R600-NEXT:     BIT_ALIGN_INT * T0.Y, KC0[3].X, KC0[3].Z, literal.x,
+; R600-NEXT:    9(1.261169e-44), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[2].W, KC0[3].Y, literal.x,
+; R600-NEXT:    7(9.809089e-45), 0(0.000000e+00)
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:
@@ -557,22 +508,14 @@ define amdgpu_kernel void @fshr_v4i32_imm(<4 x i32> addrspace(1)* %in, <4 x i32>
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshl_b32 s11, s11, 31
-; SI-NEXT:    s_lshr_b32 s3, s3, 1
-; SI-NEXT:    s_lshr_b32 s2, s2, 9
-; SI-NEXT:    s_lshl_b32 s10, s10, 23
-; SI-NEXT:    s_lshr_b32 s1, s1, 7
-; SI-NEXT:    s_lshl_b32 s9, s9, 25
-; SI-NEXT:    s_lshr_b32 s0, s0, 1
-; SI-NEXT:    s_lshl_b32 s8, s8, 31
-; SI-NEXT:    s_or_b32 s3, s11, s3
-; SI-NEXT:    s_or_b32 s2, s10, s2
-; SI-NEXT:    s_or_b32 s1, s9, s1
-; SI-NEXT:    s_or_b32 s0, s8, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_alignbit_b32 v3, s11, v0, 1
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_alignbit_b32 v2, s10, v0, 9
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_alignbit_b32 v1, s9, v0, 7
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mov_b32_e32 v2, s2
-; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    v_alignbit_b32 v0, s8, v0, 1
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -583,23 +526,15 @@ define amdgpu_kernel void @fshr_v4i32_imm(<4 x i32> addrspace(1)* %in, <4 x i32>
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x44
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    s_lshl_b32 s7, s7, 31
-; VI-NEXT:    s_lshr_b32 s3, s3, 1
-; VI-NEXT:    s_lshr_b32 s2, s2, 9
-; VI-NEXT:    s_lshl_b32 s6, s6, 23
-; VI-NEXT:    s_lshr_b32 s1, s1, 7
-; VI-NEXT:    s_lshl_b32 s5, s5, 25
-; VI-NEXT:    s_lshr_b32 s0, s0, 1
-; VI-NEXT:    s_lshl_b32 s4, s4, 31
-; VI-NEXT:    s_or_b32 s3, s7, s3
-; VI-NEXT:    s_or_b32 s2, s6, s2
-; VI-NEXT:    s_or_b32 s1, s5, s1
-; VI-NEXT:    s_or_b32 s0, s4, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mov_b32_e32 v2, s2
-; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v5, s9
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_alignbit_b32 v3, s7, v0, 1
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_alignbit_b32 v2, s6, v1, 9
+; VI-NEXT:    v_alignbit_b32 v1, s5, v0, 7
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_alignbit_b32 v0, s4, v0, 1
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
 ;
@@ -610,49 +545,31 @@ define amdgpu_kernel void @fshr_v4i32_imm(<4 x i32> addrspace(1)* %in, <4 x i32>
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x44
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s8
-; GFX9-NEXT:    s_lshl_b32 s7, s7, 31
-; GFX9-NEXT:    s_lshr_b32 s3, s3, 1
-; GFX9-NEXT:    s_lshr_b32 s2, s2, 9
-; GFX9-NEXT:    s_lshl_b32 s6, s6, 23
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 7
-; GFX9-NEXT:    s_lshl_b32 s5, s5, 25
-; GFX9-NEXT:    s_lshr_b32 s0, s0, 1
-; GFX9-NEXT:    s_lshl_b32 s4, s4, 31
-; GFX9-NEXT:    s_or_b32 s3, s7, s3
-; GFX9-NEXT:    s_or_b32 s2, s6, s2
-; GFX9-NEXT:    s_or_b32 s1, s5, s1
-; GFX9-NEXT:    s_or_b32 s0, s4, s0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_mov_b32_e32 v2, s2
-; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s9
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_alignbit_b32 v3, s7, v0, 1
+; GFX9-NEXT:    v_mov_b32_e32 v0, s1
+; GFX9-NEXT:    v_alignbit_b32 v2, s6, v1, 9
+; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, 7
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, 1
 ; GFX9-NEXT:    global_store_dwordx4 v[4:5], v[0:3], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshr_v4i32_imm:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 17, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 7, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     LSHL T0.W, KC0[4].X, literal.x,
-; R600-NEXT:     LSHR * T1.W, KC0[5].X, 1,
-; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; R600-NEXT:     LSHL T0.Z, KC0[3].W, literal.x,
-; R600-NEXT:     LSHR T2.W, KC0[4].W, literal.y,
-; R600-NEXT:     OR_INT * T0.W, PV.W, PS,
-; R600-NEXT:    23(3.222986e-44), 9(1.261169e-44)
-; R600-NEXT:     OR_INT T0.Z, PV.Z, PV.W,
-; R600-NEXT:     LSHL T1.W, KC0[3].Z, literal.x,
-; R600-NEXT:     LSHR * T2.W, KC0[4].Z, literal.y,
-; R600-NEXT:    25(3.503246e-44), 7(9.809089e-45)
-; R600-NEXT:     OR_INT T0.Y, PV.W, PS,
-; R600-NEXT:     LSHL T1.W, KC0[3].Y, literal.x,
-; R600-NEXT:     LSHR * T2.W, KC0[4].Y, 1,
-; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; R600-NEXT:     OR_INT T0.X, PV.W, PS,
+; R600-NEXT:     BIT_ALIGN_INT * T0.W, KC0[4].X, KC0[5].X, 1,
+; R600-NEXT:     BIT_ALIGN_INT * T0.Z, KC0[3].W, KC0[4].W, literal.x,
+; R600-NEXT:    9(1.261169e-44), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T0.Y, KC0[3].Z, KC0[4].Z, literal.x,
+; R600-NEXT:    7(9.809089e-45), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[3].Y, KC0[4].Y, 1,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
index 341b3942aa36..703e23c513e0 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
@@ -329,13 +329,12 @@ define amdgpu_kernel void @s_insertelement_v2i16_0_reghi_both_multi_use_1(<2 x i
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
 ; CI-NEXT:    s_load_dword s0, s[2:3], 0x0
+; CI-NEXT:    v_mov_b32_e32 v2, s4
 ; CI-NEXT:    v_mov_b32_e32 v1, s1
 ; CI-NEXT:    s_lshr_b32 s1, s4, 16
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_lshr_b32 s0, s0, 16
-; CI-NEXT:    s_lshl_b32 s2, s0, 16
-; CI-NEXT:    s_or_b32 s2, s1, s2
-; CI-NEXT:    v_mov_b32_e32 v2, s2
+; CI-NEXT:    v_alignbit_b32 v2, s0, v2, 16
 ; CI-NEXT:    flat_store_dword v[0:1], v2
 ; CI-NEXT:    ;;#ASMSTART
 ; CI-NEXT:    ; use s1

diff  --git a/llvm/test/CodeGen/AMDGPU/permute.ll b/llvm/test/CodeGen/AMDGPU/permute.ll
index 795b466b4d64..d8c20acc666b 100644
--- a/llvm/test/CodeGen/AMDGPU/permute.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute.ll
@@ -62,8 +62,7 @@ bb:
 }
 
 ; GCN-LABEL: {{^}}lsh8_or_lsr24:
-; GCN: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x6050403
-; GCN: v_perm_b32 v{{[0-9]+}}, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[MASK]]
+; GCN: v_alignbit_b32 v{{[0-9]+}}, {{[vs][0-9]+}}, {{[vs][0-9]+}}, 24
 define amdgpu_kernel void @lsh8_or_lsr24(i32 addrspace(1)* nocapture %arg, i32 %arg1) {
 bb:
   %id = tail call i32 @llvm.amdgcn.workitem.id.x()

diff  --git a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
index 07187a849c5f..d68f5212b6b8 100644
--- a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
+++ b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
@@ -16,9 +16,8 @@ define amdgpu_kernel void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out,
 ; SI-NEXT:    s_mov_b32 s9, s3
 ; SI-NEXT:    buffer_load_dword v0, off, s[8:11], 0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    v_alignbit_b32 v0, v1, v0, 16
 ; SI-NEXT:    s_mov_b32 s4, s0
 ; SI-NEXT:    s_mov_b32 s5, s1
 ; SI-NEXT:    v_mov_b32_e32 v1, v0
@@ -39,9 +38,8 @@ define amdgpu_kernel void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out,
 ; VI-NEXT:    s_mov_b32 s7, s3
 ; VI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; VI-NEXT:    v_alignbit_b32 v0, v1, v0, 16
 ; VI-NEXT:    v_mov_b32_e32 v1, v0
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -65,9 +63,8 @@ define amdgpu_kernel void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out,
 ; SI-NEXT:    s_mov_b32 s9, s3
 ; SI-NEXT:    buffer_load_dword v0, off, s[8:11], 0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    v_alignbit_b32 v0, v1, v0, 16
 ; SI-NEXT:    s_mov_b32 s4, s0
 ; SI-NEXT:    s_mov_b32 s5, s1
 ; SI-NEXT:    v_mov_b32_e32 v1, v0
@@ -88,9 +85,8 @@ define amdgpu_kernel void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out,
 ; VI-NEXT:    s_mov_b32 s7, s3
 ; VI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; VI-NEXT:    v_alignbit_b32 v0, v1, v0, 16
 ; VI-NEXT:    v_mov_b32_e32 v1, v0
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/shift-i128.ll b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
index b46e6868923c..72bab6964434 100644
--- a/llvm/test/CodeGen/AMDGPU/shift-i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
@@ -83,9 +83,7 @@ define i128 @v_shl_i128_vk(i128 %lhs) {
 ; GCN-LABEL: v_shl_i128_vk:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_lshrrev_b32_e32 v4, 15, v1
-; GCN-NEXT:    v_lshlrev_b32_e32 v5, 17, v2
-; GCN-NEXT:    v_or_b32_e32 v4, v5, v4
+; GCN-NEXT:    v_alignbit_b32 v4, v2, v1, 15
 ; GCN-NEXT:    v_alignbit_b32 v1, v1, v0, 15
 ; GCN-NEXT:    v_alignbit_b32 v3, v3, v2, 15
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 17, v0
@@ -113,9 +111,7 @@ define i128 @v_ashr_i128_vk(i128 %lhs) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_ashr_i64 v[4:5], v[2:3], 33
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 31, v2
-; GCN-NEXT:    v_lshrrev_b32_e32 v1, 1, v1
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
+; GCN-NEXT:    v_alignbit_b32 v0, v2, v1, 1
 ; GCN-NEXT:    v_alignbit_b32 v1, v3, v2, 1
 ; GCN-NEXT:    v_mov_b32_e32 v2, v4
 ; GCN-NEXT:    v_mov_b32_e32 v3, v5


        


More information about the llvm-commits mailing list