[llvm] r290351 - AMDGPU: Use i16 for i16 shift amount
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 22 08:36:26 PST 2016
Author: arsenm
Date: Thu Dec 22 10:36:25 2016
New Revision: 290351
URL: http://llvm.org/viewvc/llvm-project?rev=290351&view=rev
Log:
AMDGPU: Use i16 for i16 shift amount
Modified:
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td
llvm/trunk/test/CodeGen/AMDGPU/shl.ll
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=290351&r1=290350&r2=290351&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Thu Dec 22 10:36:25 2016
@@ -1811,8 +1811,10 @@ EVT SITargetLowering::getSetCCResultType
return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
}
-MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const {
- return MVT::i32;
+MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
+ // TODO: Should i16 be used always if legal? For now it would force VALU
+ // shifts.
+ return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
}
// Answering this is somewhat tricky and depends on the specific device which
Modified: llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td?rev=290351&r1=290350&r2=290351&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td Thu Dec 22 10:36:25 2016
@@ -416,18 +416,18 @@ def : Pat<
multiclass Bits_OpsRev_i16_Pats <SDPatternOperator op, Instruction inst> {
def : Pat<
- (op i16:$src0, i32:$src1),
+ (op i16:$src0, i16:$src1),
(inst $src1, $src0)
>;
def : Pat<
- (i32 (zext (op i16:$src0, i32:$src1))),
+ (i32 (zext (op i16:$src0, i16:$src1))),
(inst $src1, $src0)
>;
def : Pat<
- (i64 (zext (op i16:$src0, i32:$src1))),
+ (i64 (zext (op i16:$src0, i16:$src1))),
(REG_SEQUENCE VReg_64,
(inst $src1, $src0), sub0,
(V_MOV_B32_e32 (i32 0)), sub1)
@@ -464,9 +464,9 @@ def : Pat <
(V_XOR_B32_e64 $src0, $src1)
>;
-defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e32>;
-defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e32>;
-defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_I16_e32>;
+defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e64>;
+defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e64>;
+defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_I16_e64>;
def : ZExt_i16_i1_Pat<zext>;
def : ZExt_i16_i1_Pat<anyext>;
Modified: llvm/trunk/test/CodeGen/AMDGPU/shl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shl.ll?rev=290351&r1=290350&r2=290351&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/shl.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/shl.ll Thu Dec 22 10:36:25 2016
@@ -19,8 +19,8 @@ declare i32 @llvm.r600.read.tidig.x() #0
define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
- %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
- %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
+ %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
+ %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
%result = shl <2 x i32> %a, %b
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
@@ -46,52 +46,102 @@ define void @shl_v2i32(<2 x i32> addrspa
define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
- %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
- %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
+ %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
+ %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
%result = shl <4 x i32> %a, %b
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
-;VI: {{^}}shl_i16:
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
+; GCN-LABEL: {{^}}shl_i16:
+; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
- %a = load i16, i16 addrspace(1) * %in
- %b = load i16, i16 addrspace(1) * %b_ptr
+ %a = load i16, i16 addrspace(1)* %in
+ %b = load i16, i16 addrspace(1)* %b_ptr
%result = shl i16 %a, %b
store i16 %result, i16 addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}shl_i16_v_s:
+; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-;VI: {{^}}shl_v2i16:
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
+; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+define void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
+ %a = load i16, i16 addrspace(1)* %in
+ %result = shl i16 %a, %b
+ store i16 %result, i16 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_i16_v_compute_s:
+; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+
+; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+define void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
+ %a = load i16, i16 addrspace(1)* %in
+ %b.add = add i16 %b, 3
+ %result = shl i16 %a, %b.add
+ store i16 %result, i16 addrspace(1)* %out
+ ret void
+}
+; GCN-LABEL: {{^}}shl_i16_computed_amount:
+; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 3, v{{[0-9]+}}
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, [[ADD]], v{{[0-9]+}}
+define void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
+ %b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i16 1
+ %a = load volatile i16, i16 addrspace(1)* %in
+ %b = load volatile i16, i16 addrspace(1)* %b_ptr
+ %b.add = add i16 %b, 3
+ %result = shl i16 %a, %b.add
+ store i16 %result, i16 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_i16_i_s:
+; GCN: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 12
+define void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) {
+ %result = shl i16 %a, 12
+ store i16 %result, i16 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_v2i16:
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
- %a = load <2 x i16>, <2 x i16> addrspace(1) * %in
- %b = load <2 x i16>, <2 x i16> addrspace(1) * %b_ptr
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %in
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
%result = shl <2 x i16> %a, %b
store <2 x i16> %result, <2 x i16> addrspace(1)* %out
ret void
}
-
-;VI: {{^}}shl_v4i16:
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-
+; GCN-LABEL: {{^}}shl_v4i16:
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
- %a = load <4 x i16>, <4 x i16> addrspace(1) * %in
- %b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i32 %tid
+ %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
+ %a = load <4 x i16>, <4 x i16> addrspace(1)* %gep
+ %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
%result = shl <4 x i16> %a, %b
- store <4 x i16> %result, <4 x i16> addrspace(1)* %out
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %gep.out
ret void
}
@@ -107,16 +157,13 @@ define void @shl_v4i16(<4 x i16> addrspa
;EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
;EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
-;SI: {{^}}shl_i64:
-;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-
-;VI: {{^}}shl_i64:
-;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
-
+; GCN-LABEL: {{^}}shl_i64:
+; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+; VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
- %a = load i64, i64 addrspace(1) * %in
- %b = load i64, i64 addrspace(1) * %b_ptr
+ %a = load i64, i64 addrspace(1)* %in
+ %b = load i64, i64 addrspace(1)* %b_ptr
%result = shl i64 %a, %b
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -154,8 +201,8 @@ define void @shl_i64(i64 addrspace(1)* %
define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
- %a = load <2 x i64>, <2 x i64> addrspace(1) * %in
- %b = load <2 x i64>, <2 x i64> addrspace(1) * %b_ptr
+ %a = load <2 x i64>, <2 x i64> addrspace(1)* %in
+ %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
%result = shl <2 x i64> %a, %b
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
ret void
@@ -217,8 +264,8 @@ define void @shl_v2i64(<2 x i64> addrspa
define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
- %a = load <4 x i64>, <4 x i64> addrspace(1) * %in
- %b = load <4 x i64>, <4 x i64> addrspace(1) * %b_ptr
+ %a = load <4 x i64>, <4 x i64> addrspace(1)* %in
+ %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
%result = shl <4 x i64> %a, %b
store <4 x i64> %result, <4 x i64> addrspace(1)* %out
ret void
More information about the llvm-commits
mailing list