[llvm] 7a4e26a - [SelectionDAG] Fix miscompile bug in expandFunnelShift

Bjorn Pettersson via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 24 00:53:06 PDT 2020


Author: Bjorn Pettersson
Date: 2020-08-24T09:52:11+02:00
New Revision: 7a4e26adc8c2c00985fb460aba36a0884d412833

URL: https://github.com/llvm/llvm-project/commit/7a4e26adc8c2c00985fb460aba36a0884d412833
DIFF: https://github.com/llvm/llvm-project/commit/7a4e26adc8c2c00985fb460aba36a0884d412833.diff

LOG: [SelectionDAG] Fix miscompile bug in expandFunnelShift

This is a fixup of commit 0819a6416fd217 (D77152) which could
result in miscompiles. The miscompile could only happen for targets
where isOperationLegalOrCustom could return different values for
FSHL and FSHR.

The commit mentioned above added logic in expandFunnelShift to
convert between FSHL and FSHR by swapping direction of the
funnel shift. However, that transform is only legal if we know
that the shift count (modulo bitwidth) isn't zero.

Basically, since fshr(-1,0,0)==0 and fshl(-1,0,0)==-1 then doing a
rewrite such as fshr(X,Y,Z) => fshl(X,Y,0-Z) would be incorrect if
Z modulo bitwidth, could be zero.

```
$ ./alive-tv /tmp/test.ll

----------------------------------------
define i32 @src(i32 %x, i32 %y, i32 %z) {
%0:
  %t0 = fshl i32 %x, i32 %y, i32 %z
  ret i32 %t0
}
=>
define i32 @tgt(i32 %x, i32 %y, i32 %z) {
%0:
  %t0 = sub i32 32, %z
  %t1 = fshr i32 %x, i32 %y, i32 %t0
  ret i32 %t1
}
Transformation doesn't verify!
ERROR: Value mismatch

Example:
i32 %x = #x00000000 (0)
i32 %y = #x00000400 (1024)
i32 %z = #x00000000 (0)

Source:
i32 %t0 = #x00000000 (0)

Target:
i32 %t0 = #x00000020 (32)
i32 %t1 = #x00000400 (1024)
Source value: #x00000000 (0)
Target value: #x00000400 (1024)
```

It could be possible to add back the transform, given that logic
is added to check that (Z % BW) can't be zero. Since there were
no test cases proving that such a transform actually would be useful
I decided to simply remove the faulty code in this patch.

Reviewed By: foad, lebedev.ri

Differential Revision: https://reviews.llvm.org/D86430

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AMDGPU/fshl.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index d7d17bb08e64..2609ba72662b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6140,7 +6140,7 @@ bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
 }
 
 // Check that (every element of) Z is undef or not an exact multiple of BW.
-static bool isNonZeroModBitWidth(SDValue Z, unsigned BW) {
+static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) {
   return ISD::matchUnaryPredicate(
       Z,
       [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; },
@@ -6167,21 +6167,9 @@ bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result,
 
   EVT ShVT = Z.getValueType();
 
-  assert(isPowerOf2_32(BW) && "Expecting the type bitwidth to be a power of 2");
-
-  // If a funnel shift in the other direction is more supported, use it.
-  unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL;
-  if (!isOperationLegalOrCustom(Node->getOpcode(), VT) &&
-      isOperationLegalOrCustom(RevOpcode, VT)) {
-    SDValue Zero = DAG.getConstant(0, DL, ShVT);
-    SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Z);
-    Result = DAG.getNode(RevOpcode, DL, VT, X, Y, Sub);
-    return true;
-  }
-
   SDValue ShX, ShY;
   SDValue ShAmt, InvShAmt;
-  if (isNonZeroModBitWidth(Z, BW)) {
+  if (isNonZeroModBitWidthOrUndef(Z, BW)) {
     // fshl: X << C | Y >> (BW - C)
     // fshr: X << (BW - C) | Y >> C
     // where C = Z % BW is not zero

diff  --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index a20c047d556d..489a3c797366 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -16,10 +16,13 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_sub_i32 s2, 0, s2
-; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
+; SI-NEXT:    s_andn2_b32 s3, 31, s2
+; SI-NEXT:    s_lshr_b32 s1, s1, 1
+; SI-NEXT:    s_and_b32 s2, s2, 31
+; SI-NEXT:    s_lshr_b32 s1, s1, s3
+; SI-NEXT:    s_lshl_b32 s0, s0, s2
+; SI-NEXT:    s_or_b32 s0, s0, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -28,12 +31,15 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_sub_i32 s2, 0, s2
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    v_alignbit_b32 v2, s0, v0, v1
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    s_andn2_b32 s3, 31, s2
+; VI-NEXT:    s_lshr_b32 s1, s1, 1
+; VI-NEXT:    s_and_b32 s2, s2, 31
+; VI-NEXT:    s_lshr_b32 s1, s1, s3
+; VI-NEXT:    s_lshl_b32 s0, s0, s2
+; VI-NEXT:    s_or_b32 s0, s0, s1
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s0
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 ;
@@ -42,24 +48,33 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_sub_i32 s2, 0, s2
-; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_alignbit_b32 v2, s0, v0, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    s_andn2_b32 s3, 31, s2
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 1
+; GFX9-NEXT:    s_and_b32 s2, s2, 31
+; GFX9-NEXT:    s_lshr_b32 s1, s1, s3
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s2
+; GFX9-NEXT:    s_or_b32 s0, s0, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshl_i32:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 3, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 9, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     SUB_INT * T0.W, 0.0, KC0[3].X,
-; R600-NEXT:     BIT_ALIGN_INT T0.X, KC0[2].Z, KC0[2].W, PV.W,
+; R600-NEXT:     NOT_INT * T0.W, KC0[3].X,
+; R600-NEXT:     AND_INT T0.Z, KC0[3].X, literal.x,
+; R600-NEXT:     AND_INT T0.W, PV.W, literal.x,
+; R600-NEXT:     LSHR * T1.W, KC0[2].W, 1,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     LSHR T0.W, PS, PV.W,
+; R600-NEXT:     LSHL * T1.W, KC0[2].Z, PV.Z,
+; R600-NEXT:     OR_INT T0.X, PS, PV.W,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:
@@ -132,14 +147,20 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_sub_i32 s1, 0, s1
+; SI-NEXT:    s_lshr_b32 s9, s9, 1
+; SI-NEXT:    s_andn2_b32 s10, 31, s1
+; SI-NEXT:    s_and_b32 s1, s1, 31
+; SI-NEXT:    s_lshl_b32 s1, s3, s1
+; SI-NEXT:    s_andn2_b32 s3, 31, s0
+; SI-NEXT:    s_and_b32 s0, s0, 31
+; SI-NEXT:    s_lshr_b32 s8, s8, 1
+; SI-NEXT:    s_lshr_b32 s9, s9, s10
+; SI-NEXT:    s_lshr_b32 s3, s8, s3
+; SI-NEXT:    s_lshl_b32 s0, s2, s0
+; SI-NEXT:    s_or_b32 s1, s1, s9
+; SI-NEXT:    s_or_b32 s0, s0, s3
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    s_sub_i32 s0, 0, s0
-; SI-NEXT:    v_alignbit_b32 v1, s3, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_mov_b32_e32 v2, s0
-; SI-NEXT:    v_alignbit_b32 v0, s2, v0, v2
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -150,16 +171,22 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; VI-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x34
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x3c
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    s_sub_i32 s1, 0, s1
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_sub_i32 s0, 0, s0
-; VI-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s6
-; VI-NEXT:    v_mov_b32_e32 v2, s0
-; VI-NEXT:    v_alignbit_b32 v0, s4, v0, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    s_lshr_b32 s7, s7, 1
+; VI-NEXT:    s_andn2_b32 s8, 31, s1
+; VI-NEXT:    s_and_b32 s1, s1, 31
+; VI-NEXT:    s_lshl_b32 s1, s5, s1
+; VI-NEXT:    s_andn2_b32 s5, 31, s0
+; VI-NEXT:    s_and_b32 s0, s0, 31
+; VI-NEXT:    s_lshr_b32 s6, s6, 1
+; VI-NEXT:    s_lshr_b32 s7, s7, s8
+; VI-NEXT:    s_lshr_b32 s5, s6, s5
+; VI-NEXT:    s_lshl_b32 s0, s4, s0
+; VI-NEXT:    s_or_b32 s1, s1, s7
+; VI-NEXT:    s_or_b32 s0, s0, s5
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
 ;
@@ -170,30 +197,48 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x34
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x3c
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    s_sub_i32 s1, 0, s1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    s_sub_i32 s0, 0, s0
-; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s6
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, v2
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NEXT:    s_lshr_b32 s7, s7, 1
+; GFX9-NEXT:    s_andn2_b32 s8, 31, s1
+; GFX9-NEXT:    s_and_b32 s1, s1, 31
+; GFX9-NEXT:    s_lshl_b32 s1, s5, s1
+; GFX9-NEXT:    s_andn2_b32 s5, 31, s0
+; GFX9-NEXT:    s_and_b32 s0, s0, 31
+; GFX9-NEXT:    s_lshr_b32 s6, s6, 1
+; GFX9-NEXT:    s_lshr_b32 s7, s7, s8
+; GFX9-NEXT:    s_lshr_b32 s5, s6, s5
+; GFX9-NEXT:    s_lshl_b32 s0, s4, s0
+; GFX9-NEXT:    s_or_b32 s1, s1, s7
+; GFX9-NEXT:    s_or_b32 s0, s0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshl_v2i32:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 17, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     SUB_INT * T0.W, 0.0, KC0[4].X,
-; R600-NEXT:     BIT_ALIGN_INT T0.Y, KC0[3].X, KC0[3].Z, PV.W,
-; R600-NEXT:     SUB_INT * T0.W, 0.0, KC0[3].W,
-; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[2].W, KC0[3].Y, PV.W,
+; R600-NEXT:     NOT_INT * T0.W, KC0[4].X,
+; R600-NEXT:     AND_INT T0.Y, KC0[4].X, literal.x,
+; R600-NEXT:     AND_INT T0.Z, PV.W, literal.x,
+; R600-NEXT:     LSHR T0.W, KC0[3].Z, 1,
+; R600-NEXT:     NOT_INT * T1.W, KC0[3].W,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     AND_INT T0.X, KC0[3].W, literal.x,
+; R600-NEXT:     AND_INT T1.Y, PS, literal.x,
+; R600-NEXT:     LSHR T1.Z, KC0[3].Y, 1,
+; R600-NEXT:     LSHR T0.W, PV.W, PV.Z,
+; R600-NEXT:     LSHL * T1.W, KC0[3].X, PV.Y,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     OR_INT T0.Y, PS, PV.W,
+; R600-NEXT:     LSHR T0.W, PV.Z, PV.Y,
+; R600-NEXT:     LSHL * T1.W, KC0[2].W, PV.X,
+; R600-NEXT:     OR_INT T0.X, PS, PV.W,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:
@@ -277,22 +322,34 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s15
-; SI-NEXT:    s_sub_i32 s3, 0, s3
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_sub_i32 s2, 0, s2
-; SI-NEXT:    v_alignbit_b32 v3, s11, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s14
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_sub_i32 s1, 0, s1
-; SI-NEXT:    v_alignbit_b32 v2, s10, v0, v1
-; SI-NEXT:    s_sub_i32 s0, 0, s0
-; SI-NEXT:    v_mov_b32_e32 v0, s13
+; SI-NEXT:    s_lshr_b32 s14, s14, 1
+; SI-NEXT:    s_andn2_b32 s16, 31, s3
+; SI-NEXT:    s_and_b32 s3, s3, 31
+; SI-NEXT:    s_lshl_b32 s3, s11, s3
+; SI-NEXT:    s_andn2_b32 s11, 31, s2
+; SI-NEXT:    s_and_b32 s2, s2, 31
+; SI-NEXT:    s_lshl_b32 s2, s10, s2
+; SI-NEXT:    s_lshr_b32 s11, s14, s11
+; SI-NEXT:    s_or_b32 s2, s2, s11
+; SI-NEXT:    s_andn2_b32 s10, 31, s1
+; SI-NEXT:    s_and_b32 s1, s1, 31
+; SI-NEXT:    s_lshr_b32 s11, s13, 1
+; SI-NEXT:    s_lshl_b32 s1, s9, s1
+; SI-NEXT:    s_lshr_b32 s10, s11, s10
+; SI-NEXT:    s_lshr_b32 s15, s15, 1
+; SI-NEXT:    s_or_b32 s1, s1, s10
+; SI-NEXT:    s_andn2_b32 s9, 31, s0
+; SI-NEXT:    s_and_b32 s0, s0, 31
+; SI-NEXT:    s_lshr_b32 s10, s12, 1
+; SI-NEXT:    s_lshr_b32 s15, s15, s16
+; SI-NEXT:    s_lshr_b32 s9, s10, s9
+; SI-NEXT:    s_lshl_b32 s0, s8, s0
+; SI-NEXT:    s_or_b32 s3, s3, s15
+; SI-NEXT:    s_or_b32 s0, s0, s9
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_alignbit_b32 v1, s9, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s12
-; SI-NEXT:    v_mov_b32_e32 v4, s0
-; SI-NEXT:    v_alignbit_b32 v0, s8, v0, v4
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    v_mov_b32_e32 v3, s3
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -303,24 +360,36 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; VI-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x44
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x54
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_sub_i32 s3, 0, s3
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    s_sub_i32 s2, 0, s2
-; VI-NEXT:    v_alignbit_b32 v3, s7, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_sub_i32 s1, 0, s1
-; VI-NEXT:    v_alignbit_b32 v2, s6, v0, v1
-; VI-NEXT:    s_sub_i32 s0, 0, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_mov_b32_e32 v4, s0
-; VI-NEXT:    v_alignbit_b32 v0, s4, v0, v4
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    s_lshr_b32 s10, s10, 1
+; VI-NEXT:    s_andn2_b32 s14, 31, s3
+; VI-NEXT:    s_and_b32 s3, s3, 31
+; VI-NEXT:    s_lshl_b32 s3, s7, s3
+; VI-NEXT:    s_andn2_b32 s7, 31, s2
+; VI-NEXT:    s_and_b32 s2, s2, 31
+; VI-NEXT:    s_lshl_b32 s2, s6, s2
+; VI-NEXT:    s_lshr_b32 s7, s10, s7
+; VI-NEXT:    s_or_b32 s2, s2, s7
+; VI-NEXT:    s_andn2_b32 s6, 31, s1
+; VI-NEXT:    s_and_b32 s1, s1, 31
+; VI-NEXT:    s_lshr_b32 s7, s9, 1
+; VI-NEXT:    s_lshl_b32 s1, s5, s1
+; VI-NEXT:    s_lshr_b32 s6, s7, s6
+; VI-NEXT:    s_lshr_b32 s11, s11, 1
+; VI-NEXT:    s_or_b32 s1, s1, s6
+; VI-NEXT:    s_andn2_b32 s5, 31, s0
+; VI-NEXT:    s_and_b32 s0, s0, 31
+; VI-NEXT:    s_lshr_b32 s6, s8, 1
+; VI-NEXT:    s_lshr_b32 s11, s11, s14
+; VI-NEXT:    s_lshr_b32 s5, s6, s5
+; VI-NEXT:    s_lshl_b32 s0, s4, s0
+; VI-NEXT:    s_or_b32 s3, s3, s11
+; VI-NEXT:    s_or_b32 s0, s0, s5
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
 ;
@@ -331,43 +400,81 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x44
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x54
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s11
-; GFX9-NEXT:    s_sub_i32 s3, 0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_sub_i32 s2, 0, s2
-; GFX9-NEXT:    v_alignbit_b32 v3, s7, v0, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s10
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    s_sub_i32 s1, 0, s1
-; GFX9-NEXT:    v_alignbit_b32 v2, s6, v0, v1
-; GFX9-NEXT:    s_sub_i32 s0, 0, s0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s9
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s8
-; GFX9-NEXT:    v_mov_b32_e32 v4, s0
-; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, v4
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s13
+; GFX9-NEXT:    s_lshr_b32 s10, s10, 1
+; GFX9-NEXT:    s_andn2_b32 s14, 31, s3
+; GFX9-NEXT:    s_and_b32 s3, s3, 31
+; GFX9-NEXT:    s_lshl_b32 s3, s7, s3
+; GFX9-NEXT:    s_andn2_b32 s7, 31, s2
+; GFX9-NEXT:    s_and_b32 s2, s2, 31
+; GFX9-NEXT:    s_lshl_b32 s2, s6, s2
+; GFX9-NEXT:    s_lshr_b32 s7, s10, s7
+; GFX9-NEXT:    s_or_b32 s2, s2, s7
+; GFX9-NEXT:    s_andn2_b32 s6, 31, s1
+; GFX9-NEXT:    s_and_b32 s1, s1, 31
+; GFX9-NEXT:    s_lshr_b32 s7, s9, 1
+; GFX9-NEXT:    s_lshl_b32 s1, s5, s1
+; GFX9-NEXT:    s_lshr_b32 s6, s7, s6
+; GFX9-NEXT:    s_lshr_b32 s11, s11, 1
+; GFX9-NEXT:    s_or_b32 s1, s1, s6
+; GFX9-NEXT:    s_andn2_b32 s5, 31, s0
+; GFX9-NEXT:    s_and_b32 s0, s0, 31
+; GFX9-NEXT:    s_lshr_b32 s6, s8, 1
+; GFX9-NEXT:    s_lshr_b32 s11, s11, s14
+; GFX9-NEXT:    s_lshr_b32 s5, s6, s5
+; GFX9-NEXT:    s_lshl_b32 s0, s4, s0
+; GFX9-NEXT:    s_or_b32 s3, s3, s11
+; GFX9-NEXT:    s_or_b32 s0, s0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    global_store_dwordx4 v[4:5], v[0:3], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; R600-LABEL: fshl_v4i32:
 ; R600:       ; %bb.0: ; %entry
-; R600-NEXT:    ALU 9, @4, KC0[CB0:0-32], KC1[]
-; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
+; R600-NEXT:    ALU 35, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T2.XYZW, T0.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     SUB_INT * T0.W, 0.0, KC0[6].X,
-; R600-NEXT:     BIT_ALIGN_INT * T0.W, KC0[4].X, KC0[5].X, PV.W,
-; R600-NEXT:     SUB_INT * T1.W, 0.0, KC0[5].W,
-; R600-NEXT:     BIT_ALIGN_INT * T0.Z, KC0[3].W, KC0[4].W, PV.W,
-; R600-NEXT:     SUB_INT * T1.W, 0.0, KC0[5].Z,
-; R600-NEXT:     BIT_ALIGN_INT * T0.Y, KC0[3].Z, KC0[4].Z, PV.W,
-; R600-NEXT:     SUB_INT * T1.W, 0.0, KC0[5].Y,
-; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[3].Y, KC0[4].Y, PV.W,
-; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; R600-NEXT:     AND_INT * T0.W, KC0[5].Y, literal.x,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     LSHR T0.Z, KC0[4].Z, 1,
+; R600-NEXT:     NOT_INT T1.W, KC0[6].X,
+; R600-NEXT:     AND_INT * T2.W, KC0[6].X, literal.x,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     AND_INT T0.X, KC0[5].Z, literal.x,
+; R600-NEXT:     LSHL T0.Y, KC0[4].X, PS,
+; R600-NEXT:     NOT_INT T1.Z, KC0[5].W,
+; R600-NEXT:     AND_INT * T1.W, PV.W, literal.x,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     LSHR * T2.W, KC0[5].X, 1,
+; R600-NEXT:     LSHR T1.X, PV.W, T1.W,
+; R600-NEXT:     AND_INT T1.Y, KC0[5].W, literal.x,
+; R600-NEXT:     AND_INT T1.Z, T1.Z, literal.x,
+; R600-NEXT:     LSHR T1.W, KC0[4].W, 1,
+; R600-NEXT:     NOT_INT * T2.W, KC0[5].Z,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     AND_INT T2.X, PS, literal.x,
+; R600-NEXT:     LSHR T2.Y, PV.W, PV.Z,
+; R600-NEXT:     LSHL T1.Z, KC0[3].W, PV.Y,
+; R600-NEXT:     NOT_INT T1.W, KC0[5].Y,
+; R600-NEXT:     OR_INT * T2.W, T0.Y, PV.X,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     AND_INT T1.X, PV.W, literal.x,
+; R600-NEXT:     LSHR T0.Y, KC0[4].Y, 1,
+; R600-NEXT:     OR_INT T2.Z, PV.Z, PV.Y,
+; R600-NEXT:     LSHR T1.W, T0.Z, PV.X,
+; R600-NEXT:     LSHL * T3.W, KC0[3].Z, T0.X,
+; R600-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; R600-NEXT:     OR_INT T2.Y, PS, PV.W,
+; R600-NEXT:     LSHR T1.W, PV.Y, PV.X,
+; R600-NEXT:     LSHL * T0.W, KC0[3].Y, T0.W,
+; R600-NEXT:     OR_INT T2.X, PS, PV.W,
+; R600-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:
   %0 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z)


        


More information about the llvm-commits mailing list