[llvm] 0efbb70 - [AMDGPU] should expand ROTL i16 to shifts.

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 17 07:34:52 PDT 2020


Author: alex-t
Date: 2020-09-17T17:34:33+03:00
New Revision: 0efbb70b719e990fe153373eda5a604344ae36bb

URL: https://github.com/llvm/llvm-project/commit/0efbb70b719e990fe153373eda5a604344ae36bb
DIFF: https://github.com/llvm/llvm-project/commit/0efbb70b719e990fe153373eda5a604344ae36bb.diff

LOG: [AMDGPU] should expand ROTL i16 to shifts.

Instruction combining pass turns library rotl implementation to llvm.fshl.i16.
In the selection dag the intrinsic is turned to ISD::ROTL node that cannot be selected.
Need to expand it to shifts again.

Reviewed By: rampitec, arsenm

Differential Revision: https://reviews.llvm.org/D87618

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/rotl.ll
    llvm/test/CodeGen/AMDGPU/rotr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e119d65a7f0a..ed0a3a17e71a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -546,8 +546,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
 
-    setOperationAction(ISD::ROTR, MVT::i16, Promote);
-    setOperationAction(ISD::ROTL, MVT::i16, Promote);
+    setOperationAction(ISD::ROTR, MVT::i16, Expand);
+    setOperationAction(ISD::ROTL, MVT::i16, Expand);
 
     setOperationAction(ISD::SDIV, MVT::i16, Promote);
     setOperationAction(ISD::UDIV, MVT::i16, Promote);

diff  --git a/llvm/test/CodeGen/AMDGPU/rotl.ll b/llvm/test/CodeGen/AMDGPU/rotl.ll
index c4bc8cdaabf5..12c46d360528 100644
--- a/llvm/test/CodeGen/AMDGPU/rotl.ll
+++ b/llvm/test/CodeGen/AMDGPU/rotl.ll
@@ -55,3 +55,28 @@ entry:
   store <4 x i32> %3, <4 x i32> addrspace(1)* %in
   ret void
 }
+
+; GCN-LABEL: @test_rotl_i16
+; GCN: global_load_ushort [[X:v[0-9]+]]
+; GCN: global_load_ushort [[D:v[0-9]+]]
+; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
+; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
+; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
+; GCN: v_lshlrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
+; GCN: v_lshrrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
+; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
+; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+
+declare i16 @llvm.fshl.i16(i16, i16, i16)
+
+define void @test_rotl_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) {
+entry:
+  %arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16
+  %a = load i16, i16 addrspace(1)* %arrayidx
+  %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24
+  %b = load i16, i16 addrspace(1)* %arrayidx2
+  %c = tail call i16 @llvm.fshl.i16(i16 %a, i16 %a, i16 %b)
+  %arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4
+  store i16 %c, i16 addrspace(1)* %arrayidx5
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/rotr.ll b/llvm/test/CodeGen/AMDGPU/rotr.ll
index b4e2c2b67ce1..84f277bcc087 100644
--- a/llvm/test/CodeGen/AMDGPU/rotr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rotr.ll
@@ -51,3 +51,28 @@ entry:
   store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in
   ret void
 }
+
+; GCN-LABEL: @test_rotr_i16
+; GCN: global_load_ushort [[X:v[0-9]+]]
+; GCN: global_load_ushort [[D:v[0-9]+]]
+; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
+; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
+; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
+; GCN: v_lshrrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
+; GCN: v_lshlrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
+; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
+; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+
+declare i16 @llvm.fshr.i16(i16, i16, i16)
+
+define void @test_rotr_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) {
+entry:
+  %arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16
+  %a = load i16, i16 addrspace(1)* %arrayidx
+  %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24
+  %b = load i16, i16 addrspace(1)* %arrayidx2
+  %c = tail call i16 @llvm.fshr.i16(i16 %a, i16 %a, i16 %b)
+  %arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4
+  store i16 %c, i16 addrspace(1)* %arrayidx5
+  ret void
+}


        


More information about the llvm-commits mailing list