[llvm] r227330 - [Hexagon] Replacing XTYPE/SHIFT intrinsic patternss. Adding tests and missing instructions with tests.
Colin LeMahieu
colinl at codeaurora.org
Wed Jan 28 09:37:59 PST 2015
Author: colinl
Date: Wed Jan 28 11:37:59 2015
New Revision: 227330
URL: http://llvm.org/viewvc/llvm-project?rev=227330&view=rev
Log:
[Hexagon] Replacing XTYPE/SHIFT intrinsic patternss. Adding tests and missing instructions with tests.
Added:
llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
Modified:
llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.td
llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV5.td
llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td
llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td
llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV5.td
llvm/trunk/test/MC/Disassembler/Hexagon/xtype_shift.txt
Modified: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.td?rev=227330&r1=227329&r2=227330&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.td Wed Jan 28 11:37:59 2015
@@ -3358,6 +3358,11 @@ def S2_asl_i_r_sat : T_S2op_2_ii <"asl",
let isCodeGenOnly = 0 in
def S2_asr_i_r_rnd : T_S2op_2_ii <"asr", 0b010, 0b000, 0, 1>;
+def S2_asr_i_r_rnd_goodsyntax
+ : SInst <(outs IntRegs:$dst), (ins IntRegs:$src, u5Imm:$u5),
+ "$dst = asrrnd($src, #$u5)",
+ [], "", S_2op_tc_1_SLOT23>;
+
def: Pat<(i32 (sra (i32 (add (i32 (sra I32:$src1, u5ImmPred:$src2)),
(i32 1))),
(i32 1))),
Modified: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV5.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV5.td?rev=227330&r1=227329&r2=227330&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV5.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV5.td Wed Jan 28 11:37:59 2015
@@ -25,6 +25,10 @@ def S2_asr_i_p_rnd : S_2OpInstImm<"asr",
let Inst{13-8} = src2;
}
+def S2_asr_i_p_rnd_goodsyntax
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ "$dst = asrrnd($src1, #$src2)">;
+
let isCodeGenOnly = 0 in
def C4_fastcorner9 : T_LOGICAL_2OP<"fastcorner9", 0b000, 0, 0>,
Requires<[HasV5T]> {
Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td?rev=227330&r1=227329&r2=227330&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td Wed Jan 28 11:37:59 2015
@@ -33,6 +33,10 @@ class T_IR_pat <InstHexagon MI, Intrinsi
: Pat<(IntID ImmPred:$Is, I32:$Rt),
(MI ImmPred:$Is, I32:$Rt)>;
+class T_PI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID I64:$Rs, imm:$It),
+ (MI DoubleRegs:$Rs, imm:$It)>;
+
class T_RR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt),
(MI I32:$Rs, I32:$Rt)>;
@@ -57,14 +61,30 @@ class T_RRI_pat <InstHexagon MI, Intrins
: Pat <(IntID I32:$Rs, I32:$Rt, imm:$Iu),
(MI I32:$Rs, I32:$Rt, imm:$Iu)>;
+class T_IRI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID imm:$It, I32:$Rs, imm:$Iu),
+ (MI imm:$It, I32:$Rs, imm:$Iu)>;
+
class T_RRR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt, I32:$Ru),
(MI I32:$Rs, I32:$Rt, I32:$Ru)>;
+class T_PPI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt, imm:$Iu),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt, imm:$Iu)>;
+
+class T_PPR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt, I32:$Ru),
+ (MI DoubleRegs:$Rs, DoubleRegs:$Rt, I32:$Ru)>;
+
class T_PRR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I32:$Rt, I32:$Ru),
(MI DoubleRegs:$Rs, I32:$Rt, I32:$Ru)>;
+class T_PR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I32:$Rt),
+ (MI DoubleRegs:$Rs, I32:$Rt)>;
+
//===----------------------------------------------------------------------===//
// MPYS / Multipy signed/unsigned halfwords
//Rd=mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat]
@@ -310,6 +330,143 @@ def : T_RR_pat<A2_min, int_hexagon_A2_m
def : T_RR_pat<A2_maxu, int_hexagon_A2_maxu>;
def : T_RR_pat<A2_minu, int_hexagon_A2_minu>;
+// Shift and accumulate
+def : T_RRI_pat <S2_asr_i_r_nac, int_hexagon_S2_asr_i_r_nac>;
+def : T_RRI_pat <S2_lsr_i_r_nac, int_hexagon_S2_lsr_i_r_nac>;
+def : T_RRI_pat <S2_asl_i_r_nac, int_hexagon_S2_asl_i_r_nac>;
+def : T_RRI_pat <S2_asr_i_r_acc, int_hexagon_S2_asr_i_r_acc>;
+def : T_RRI_pat <S2_lsr_i_r_acc, int_hexagon_S2_lsr_i_r_acc>;
+def : T_RRI_pat <S2_asl_i_r_acc, int_hexagon_S2_asl_i_r_acc>;
+
+def : T_RRI_pat <S2_asr_i_r_and, int_hexagon_S2_asr_i_r_and>;
+def : T_RRI_pat <S2_lsr_i_r_and, int_hexagon_S2_lsr_i_r_and>;
+def : T_RRI_pat <S2_asl_i_r_and, int_hexagon_S2_asl_i_r_and>;
+def : T_RRI_pat <S2_asr_i_r_or, int_hexagon_S2_asr_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_or, int_hexagon_S2_lsr_i_r_or>;
+def : T_RRI_pat <S2_asl_i_r_or, int_hexagon_S2_asl_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_xacc, int_hexagon_S2_lsr_i_r_xacc>;
+def : T_RRI_pat <S2_asl_i_r_xacc, int_hexagon_S2_asl_i_r_xacc>;
+
+def : T_PPI_pat <S2_asr_i_p_nac, int_hexagon_S2_asr_i_p_nac>;
+def : T_PPI_pat <S2_lsr_i_p_nac, int_hexagon_S2_lsr_i_p_nac>;
+def : T_PPI_pat <S2_asl_i_p_nac, int_hexagon_S2_asl_i_p_nac>;
+def : T_PPI_pat <S2_asr_i_p_acc, int_hexagon_S2_asr_i_p_acc>;
+def : T_PPI_pat <S2_lsr_i_p_acc, int_hexagon_S2_lsr_i_p_acc>;
+def : T_PPI_pat <S2_asl_i_p_acc, int_hexagon_S2_asl_i_p_acc>;
+
+def : T_PPI_pat <S2_asr_i_p_and, int_hexagon_S2_asr_i_p_and>;
+def : T_PPI_pat <S2_lsr_i_p_and, int_hexagon_S2_lsr_i_p_and>;
+def : T_PPI_pat <S2_asl_i_p_and, int_hexagon_S2_asl_i_p_and>;
+def : T_PPI_pat <S2_asr_i_p_or, int_hexagon_S2_asr_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_or, int_hexagon_S2_lsr_i_p_or>;
+def : T_PPI_pat <S2_asl_i_p_or, int_hexagon_S2_asl_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_xacc, int_hexagon_S2_lsr_i_p_xacc>;
+def : T_PPI_pat <S2_asl_i_p_xacc, int_hexagon_S2_asl_i_p_xacc>;
+
+def : T_RRR_pat <S2_asr_r_r_nac, int_hexagon_S2_asr_r_r_nac>;
+def : T_RRR_pat <S2_lsr_r_r_nac, int_hexagon_S2_lsr_r_r_nac>;
+def : T_RRR_pat <S2_asl_r_r_nac, int_hexagon_S2_asl_r_r_nac>;
+def : T_RRR_pat <S2_lsl_r_r_nac, int_hexagon_S2_lsl_r_r_nac>;
+def : T_RRR_pat <S2_asr_r_r_acc, int_hexagon_S2_asr_r_r_acc>;
+def : T_RRR_pat <S2_lsr_r_r_acc, int_hexagon_S2_lsr_r_r_acc>;
+def : T_RRR_pat <S2_asl_r_r_acc, int_hexagon_S2_asl_r_r_acc>;
+def : T_RRR_pat <S2_lsl_r_r_acc, int_hexagon_S2_lsl_r_r_acc>;
+
+def : T_RRR_pat <S2_asr_r_r_and, int_hexagon_S2_asr_r_r_and>;
+def : T_RRR_pat <S2_lsr_r_r_and, int_hexagon_S2_lsr_r_r_and>;
+def : T_RRR_pat <S2_asl_r_r_and, int_hexagon_S2_asl_r_r_and>;
+def : T_RRR_pat <S2_lsl_r_r_and, int_hexagon_S2_lsl_r_r_and>;
+def : T_RRR_pat <S2_asr_r_r_or, int_hexagon_S2_asr_r_r_or>;
+def : T_RRR_pat <S2_lsr_r_r_or, int_hexagon_S2_lsr_r_r_or>;
+def : T_RRR_pat <S2_asl_r_r_or, int_hexagon_S2_asl_r_r_or>;
+def : T_RRR_pat <S2_lsl_r_r_or, int_hexagon_S2_lsl_r_r_or>;
+
+def : T_PPR_pat <S2_asr_r_p_nac, int_hexagon_S2_asr_r_p_nac>;
+def : T_PPR_pat <S2_lsr_r_p_nac, int_hexagon_S2_lsr_r_p_nac>;
+def : T_PPR_pat <S2_asl_r_p_nac, int_hexagon_S2_asl_r_p_nac>;
+def : T_PPR_pat <S2_lsl_r_p_nac, int_hexagon_S2_lsl_r_p_nac>;
+def : T_PPR_pat <S2_asr_r_p_acc, int_hexagon_S2_asr_r_p_acc>;
+def : T_PPR_pat <S2_lsr_r_p_acc, int_hexagon_S2_lsr_r_p_acc>;
+def : T_PPR_pat <S2_asl_r_p_acc, int_hexagon_S2_asl_r_p_acc>;
+def : T_PPR_pat <S2_lsl_r_p_acc, int_hexagon_S2_lsl_r_p_acc>;
+
+def : T_PPR_pat <S2_asr_r_p_and, int_hexagon_S2_asr_r_p_and>;
+def : T_PPR_pat <S2_lsr_r_p_and, int_hexagon_S2_lsr_r_p_and>;
+def : T_PPR_pat <S2_asl_r_p_and, int_hexagon_S2_asl_r_p_and>;
+def : T_PPR_pat <S2_lsl_r_p_and, int_hexagon_S2_lsl_r_p_and>;
+def : T_PPR_pat <S2_asr_r_p_or, int_hexagon_S2_asr_r_p_or>;
+def : T_PPR_pat <S2_lsr_r_p_or, int_hexagon_S2_lsr_r_p_or>;
+def : T_PPR_pat <S2_asl_r_p_or, int_hexagon_S2_asl_r_p_or>;
+def : T_PPR_pat <S2_lsl_r_p_or, int_hexagon_S2_lsl_r_p_or>;
+
+def : T_RRI_pat <S2_asr_i_r_nac, int_hexagon_S2_asr_i_r_nac>;
+def : T_RRI_pat <S2_lsr_i_r_nac, int_hexagon_S2_lsr_i_r_nac>;
+def : T_RRI_pat <S2_asl_i_r_nac, int_hexagon_S2_asl_i_r_nac>;
+def : T_RRI_pat <S2_asr_i_r_acc, int_hexagon_S2_asr_i_r_acc>;
+def : T_RRI_pat <S2_lsr_i_r_acc, int_hexagon_S2_lsr_i_r_acc>;
+def : T_RRI_pat <S2_asl_i_r_acc, int_hexagon_S2_asl_i_r_acc>;
+
+def : T_RRI_pat <S2_asr_i_r_and, int_hexagon_S2_asr_i_r_and>;
+def : T_RRI_pat <S2_lsr_i_r_and, int_hexagon_S2_lsr_i_r_and>;
+def : T_RRI_pat <S2_asl_i_r_and, int_hexagon_S2_asl_i_r_and>;
+def : T_RRI_pat <S2_asr_i_r_or, int_hexagon_S2_asr_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_or, int_hexagon_S2_lsr_i_r_or>;
+def : T_RRI_pat <S2_asl_i_r_or, int_hexagon_S2_asl_i_r_or>;
+def : T_RRI_pat <S2_lsr_i_r_xacc, int_hexagon_S2_lsr_i_r_xacc>;
+def : T_RRI_pat <S2_asl_i_r_xacc, int_hexagon_S2_asl_i_r_xacc>;
+
+def : T_PPI_pat <S2_asr_i_p_nac, int_hexagon_S2_asr_i_p_nac>;
+def : T_PPI_pat <S2_lsr_i_p_nac, int_hexagon_S2_lsr_i_p_nac>;
+def : T_PPI_pat <S2_asl_i_p_nac, int_hexagon_S2_asl_i_p_nac>;
+def : T_PPI_pat <S2_asr_i_p_acc, int_hexagon_S2_asr_i_p_acc>;
+def : T_PPI_pat <S2_lsr_i_p_acc, int_hexagon_S2_lsr_i_p_acc>;
+def : T_PPI_pat <S2_asl_i_p_acc, int_hexagon_S2_asl_i_p_acc>;
+
+def : T_PPI_pat <S2_asr_i_p_and, int_hexagon_S2_asr_i_p_and>;
+def : T_PPI_pat <S2_lsr_i_p_and, int_hexagon_S2_lsr_i_p_and>;
+def : T_PPI_pat <S2_asl_i_p_and, int_hexagon_S2_asl_i_p_and>;
+def : T_PPI_pat <S2_asr_i_p_or, int_hexagon_S2_asr_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_or, int_hexagon_S2_lsr_i_p_or>;
+def : T_PPI_pat <S2_asl_i_p_or, int_hexagon_S2_asl_i_p_or>;
+def : T_PPI_pat <S2_lsr_i_p_xacc, int_hexagon_S2_lsr_i_p_xacc>;
+def : T_PPI_pat <S2_asl_i_p_xacc, int_hexagon_S2_asl_i_p_xacc>;
+
+def : T_RRR_pat <S2_asr_r_r_nac, int_hexagon_S2_asr_r_r_nac>;
+def : T_RRR_pat <S2_lsr_r_r_nac, int_hexagon_S2_lsr_r_r_nac>;
+def : T_RRR_pat <S2_asl_r_r_nac, int_hexagon_S2_asl_r_r_nac>;
+def : T_RRR_pat <S2_lsl_r_r_nac, int_hexagon_S2_lsl_r_r_nac>;
+def : T_RRR_pat <S2_asr_r_r_acc, int_hexagon_S2_asr_r_r_acc>;
+def : T_RRR_pat <S2_lsr_r_r_acc, int_hexagon_S2_lsr_r_r_acc>;
+def : T_RRR_pat <S2_asl_r_r_acc, int_hexagon_S2_asl_r_r_acc>;
+def : T_RRR_pat <S2_lsl_r_r_acc, int_hexagon_S2_lsl_r_r_acc>;
+
+def : T_RRR_pat <S2_asr_r_r_and, int_hexagon_S2_asr_r_r_and>;
+def : T_RRR_pat <S2_lsr_r_r_and, int_hexagon_S2_lsr_r_r_and>;
+def : T_RRR_pat <S2_asl_r_r_and, int_hexagon_S2_asl_r_r_and>;
+def : T_RRR_pat <S2_lsl_r_r_and, int_hexagon_S2_lsl_r_r_and>;
+def : T_RRR_pat <S2_asr_r_r_or, int_hexagon_S2_asr_r_r_or>;
+def : T_RRR_pat <S2_lsr_r_r_or, int_hexagon_S2_lsr_r_r_or>;
+def : T_RRR_pat <S2_asl_r_r_or, int_hexagon_S2_asl_r_r_or>;
+def : T_RRR_pat <S2_lsl_r_r_or, int_hexagon_S2_lsl_r_r_or>;
+
+def : T_PPR_pat <S2_asr_r_p_nac, int_hexagon_S2_asr_r_p_nac>;
+def : T_PPR_pat <S2_lsr_r_p_nac, int_hexagon_S2_lsr_r_p_nac>;
+def : T_PPR_pat <S2_asl_r_p_nac, int_hexagon_S2_asl_r_p_nac>;
+def : T_PPR_pat <S2_lsl_r_p_nac, int_hexagon_S2_lsl_r_p_nac>;
+def : T_PPR_pat <S2_asr_r_p_acc, int_hexagon_S2_asr_r_p_acc>;
+def : T_PPR_pat <S2_lsr_r_p_acc, int_hexagon_S2_lsr_r_p_acc>;
+def : T_PPR_pat <S2_asl_r_p_acc, int_hexagon_S2_asl_r_p_acc>;
+def : T_PPR_pat <S2_lsl_r_p_acc, int_hexagon_S2_lsl_r_p_acc>;
+
+def : T_PPR_pat <S2_asr_r_p_and, int_hexagon_S2_asr_r_p_and>;
+def : T_PPR_pat <S2_lsr_r_p_and, int_hexagon_S2_lsr_r_p_and>;
+def : T_PPR_pat <S2_asl_r_p_and, int_hexagon_S2_asl_r_p_and>;
+def : T_PPR_pat <S2_lsl_r_p_and, int_hexagon_S2_lsl_r_p_and>;
+def : T_PPR_pat <S2_asr_r_p_or, int_hexagon_S2_asr_r_p_or>;
+def : T_PPR_pat <S2_lsr_r_p_or, int_hexagon_S2_lsr_r_p_or>;
+def : T_PPR_pat <S2_asl_r_p_or, int_hexagon_S2_asl_r_p_or>;
+def : T_PPR_pat <S2_lsl_r_p_or, int_hexagon_S2_lsl_r_p_or>;
+
/********************************************************************
* ALU32/ALU *
*********************************************************************/
@@ -409,6 +566,40 @@ def : T_RRI_pat <M2_naccii, int_hexagon_
// XOR and XOR with destination
def : T_RRR_pat <M2_xor_xacc, int_hexagon_M2_xor_xacc>;
+// Shift by immediate and add
+def : T_RRI_pat<S2_addasl_rrri, int_hexagon_S2_addasl_rrri>;
+
+/********************************************************************
+* STYPE/SHIFT *
+*********************************************************************/
+
+def : T_PI_pat <S2_asr_i_p, int_hexagon_S2_asr_i_p>;
+def : T_PI_pat <S2_lsr_i_p, int_hexagon_S2_lsr_i_p>;
+def : T_PI_pat <S2_asl_i_p, int_hexagon_S2_asl_i_p>;
+
+def : T_PR_pat <S2_asr_r_p, int_hexagon_S2_asr_r_p>;
+def : T_PR_pat <S2_lsr_r_p, int_hexagon_S2_lsr_r_p>;
+def : T_PR_pat <S2_asl_r_p, int_hexagon_S2_asl_r_p>;
+def : T_PR_pat <S2_lsl_r_p, int_hexagon_S2_lsl_r_p>;
+
+def : T_RR_pat <S2_asr_r_r, int_hexagon_S2_asr_r_r>;
+def : T_RR_pat <S2_lsr_r_r, int_hexagon_S2_lsr_r_r>;
+def : T_RR_pat <S2_asl_r_r, int_hexagon_S2_asl_r_r>;
+def : T_RR_pat <S2_lsl_r_r, int_hexagon_S2_lsl_r_r>;
+
+def : T_RR_pat <S2_asr_r_r_sat, int_hexagon_S2_asr_r_r_sat>;
+def : T_RR_pat <S2_asl_r_r_sat, int_hexagon_S2_asl_r_r_sat>;
+
+def : T_RI_pat <S2_asr_i_r, int_hexagon_S2_asr_i_r>;
+def : T_RI_pat <S2_lsr_i_r, int_hexagon_S2_lsr_i_r>;
+def : T_RI_pat <S2_asl_i_r, int_hexagon_S2_asl_i_r>;
+def : T_RI_pat <S2_asr_i_r_rnd, int_hexagon_S2_asr_i_r_rnd>;
+def : T_RI_pat <S2_asr_i_r_rnd_goodsyntax,
+ int_hexagon_S2_asr_i_r_rnd_goodsyntax>;
+
+// Shift left by immediate with saturation.
+def : T_RI_pat <S2_asl_i_r_sat, int_hexagon_S2_asl_i_r_sat>;
+
//
// ALU 32 types.
//
@@ -3144,200 +3335,6 @@ def HEXAGON_C2_tfrrp:
def HEXAGON_C2_vitpack:
si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>;
-
-/********************************************************************
-* STYPE/SHIFT *
-*********************************************************************/
-
-// STYPE / SHIFT / Shift by immediate.
-def HEXAGON_S2_asl_i_r:
- si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>;
-def HEXAGON_S2_asr_i_r:
- si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>;
-def HEXAGON_S2_lsr_i_r:
- si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>;
-def HEXAGON_S2_asl_i_p:
- di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>;
-def HEXAGON_S2_asr_i_p:
- di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>;
-def HEXAGON_S2_lsr_i_p:
- di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>;
-
-// STYPE / SHIFT / Shift by immediate and accumulate.
-def HEXAGON_S2_asl_i_r_acc:
- si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>;
-def HEXAGON_S2_asr_i_r_acc:
- si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>;
-def HEXAGON_S2_lsr_i_r_acc:
- si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>;
-def HEXAGON_S2_asl_i_r_nac:
- si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>;
-def HEXAGON_S2_asr_i_r_nac:
- si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>;
-def HEXAGON_S2_lsr_i_r_nac:
- si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>;
-def HEXAGON_S2_asl_i_p_acc:
- di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>;
-def HEXAGON_S2_asr_i_p_acc:
- di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>;
-def HEXAGON_S2_lsr_i_p_acc:
- di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>;
-def HEXAGON_S2_asl_i_p_nac:
- di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>;
-def HEXAGON_S2_asr_i_p_nac:
- di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>;
-def HEXAGON_S2_lsr_i_p_nac:
- di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>;
-
-// STYPE / SHIFT / Shift by immediate and add.
-def HEXAGON_S2_addasl_rrri:
- si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>;
-
-// STYPE / SHIFT / Shift by immediate and logical.
-def HEXAGON_S2_asl_i_r_and:
- si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>;
-def HEXAGON_S2_asr_i_r_and:
- si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>;
-def HEXAGON_S2_lsr_i_r_and:
- si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>;
-
-def HEXAGON_S2_asl_i_r_xacc:
- si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>;
-def HEXAGON_S2_lsr_i_r_xacc:
- si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>;
-
-def HEXAGON_S2_asl_i_r_or:
- si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>;
-def HEXAGON_S2_asr_i_r_or:
- si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>;
-def HEXAGON_S2_lsr_i_r_or:
- si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>;
-
-def HEXAGON_S2_asl_i_p_and:
- di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>;
-def HEXAGON_S2_asr_i_p_and:
- di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>;
-def HEXAGON_S2_lsr_i_p_and:
- di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>;
-
-def HEXAGON_S2_asl_i_p_xacc:
- di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>;
-def HEXAGON_S2_lsr_i_p_xacc:
- di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>;
-
-def HEXAGON_S2_asl_i_p_or:
- di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>;
-def HEXAGON_S2_asr_i_p_or:
- di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>;
-def HEXAGON_S2_lsr_i_p_or:
- di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>;
-
-// STYPE / SHIFT / Shift right by immediate with rounding.
-def HEXAGON_S2_asr_i_r_rnd:
- si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>;
-def HEXAGON_S2_asr_i_r_rnd_goodsyntax:
- si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>;
-
-// STYPE / SHIFT / Shift left by immediate with saturation.
-def HEXAGON_S2_asl_i_r_sat:
- si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>;
-
-// STYPE / SHIFT / Shift by register.
-def HEXAGON_S2_asl_r_r:
- si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>;
-def HEXAGON_S2_asr_r_r:
- si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>;
-def HEXAGON_S2_lsl_r_r:
- si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>;
-def HEXAGON_S2_lsr_r_r:
- si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>;
-def HEXAGON_S2_asl_r_p:
- di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>;
-def HEXAGON_S2_asr_r_p:
- di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>;
-def HEXAGON_S2_lsl_r_p:
- di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>;
-def HEXAGON_S2_lsr_r_p:
- di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>;
-
-// STYPE / SHIFT / Shift by register and accumulate.
-def HEXAGON_S2_asl_r_r_acc:
- si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>;
-def HEXAGON_S2_asr_r_r_acc:
- si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>;
-def HEXAGON_S2_lsl_r_r_acc:
- si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>;
-def HEXAGON_S2_lsr_r_r_acc:
- si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>;
-def HEXAGON_S2_asl_r_p_acc:
- di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>;
-def HEXAGON_S2_asr_r_p_acc:
- di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>;
-def HEXAGON_S2_lsl_r_p_acc:
- di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>;
-def HEXAGON_S2_lsr_r_p_acc:
- di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>;
-
-def HEXAGON_S2_asl_r_r_nac:
- si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>;
-def HEXAGON_S2_asr_r_r_nac:
- si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>;
-def HEXAGON_S2_lsl_r_r_nac:
- si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>;
-def HEXAGON_S2_lsr_r_r_nac:
- si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>;
-def HEXAGON_S2_asl_r_p_nac:
- di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>;
-def HEXAGON_S2_asr_r_p_nac:
- di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>;
-def HEXAGON_S2_lsl_r_p_nac:
- di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>;
-def HEXAGON_S2_lsr_r_p_nac:
- di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>;
-
-// STYPE / SHIFT / Shift by register and logical.
-def HEXAGON_S2_asl_r_r_and:
- si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>;
-def HEXAGON_S2_asr_r_r_and:
- si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>;
-def HEXAGON_S2_lsl_r_r_and:
- si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>;
-def HEXAGON_S2_lsr_r_r_and:
- si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>;
-
-def HEXAGON_S2_asl_r_r_or:
- si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>;
-def HEXAGON_S2_asr_r_r_or:
- si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>;
-def HEXAGON_S2_lsl_r_r_or:
- si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>;
-def HEXAGON_S2_lsr_r_r_or:
- si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>;
-
-def HEXAGON_S2_asl_r_p_and:
- di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>;
-def HEXAGON_S2_asr_r_p_and:
- di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>;
-def HEXAGON_S2_lsl_r_p_and:
- di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>;
-def HEXAGON_S2_lsr_r_p_and:
- di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>;
-
-def HEXAGON_S2_asl_r_p_or:
- di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>;
-def HEXAGON_S2_asr_r_p_or:
- di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>;
-def HEXAGON_S2_lsl_r_p_or:
- di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>;
-def HEXAGON_S2_lsr_r_p_or:
- di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>;
-
-// STYPE / SHIFT / Shift by register with saturation.
-def HEXAGON_S2_asl_r_r_sat:
- si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>;
-def HEXAGON_S2_asr_r_r_sat:
- si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>;
-
// STYPE / SHIFT / Table Index.
def Hexagon_S2_tableidxb_goodsyntax:
si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td?rev=227330&r1=227329&r2=227330&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td Wed Jan 28 11:37:59 2015
@@ -12,6 +12,18 @@
// 80-V9418-12 Rev. A
// June 15, 2010
+// Shift an immediate left by register amount
+def : T_IR_pat<S4_lsli, int_hexagon_S4_lsli>;
+
+// Shift and add/sub/and/or
+def : T_IRI_pat <S4_andi_asl_ri, int_hexagon_S4_andi_asl_ri>;
+def : T_IRI_pat <S4_ori_asl_ri, int_hexagon_S4_ori_asl_ri>;
+def : T_IRI_pat <S4_addi_asl_ri, int_hexagon_S4_addi_asl_ri>;
+def : T_IRI_pat <S4_subi_asl_ri, int_hexagon_S4_subi_asl_ri>;
+def : T_IRI_pat <S4_andi_lsr_ri, int_hexagon_S4_andi_lsr_ri>;
+def : T_IRI_pat <S4_ori_lsr_ri, int_hexagon_S4_ori_lsr_ri>;
+def : T_IRI_pat <S4_addi_lsr_ri, int_hexagon_S4_addi_lsr_ri>;
+def : T_IRI_pat <S4_subi_lsr_ri, int_hexagon_S4_subi_lsr_ri>;
//
// ALU 32 types.
Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV5.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV5.td?rev=227330&r1=227329&r2=227330&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV5.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV5.td Wed Jan 28 11:37:59 2015
@@ -1,3 +1,16 @@
+//===- HexagonIntrinsicsV5.td - V4 Instruction intrinsics --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def : T_PI_pat <S2_asr_i_p_rnd, int_hexagon_S2_asr_i_p_rnd>;
+def : T_PI_pat <S2_asr_i_p_rnd_goodsyntax,
+ int_hexagon_S2_asr_i_p_rnd_goodsyntax>;
+
class sf_SInst_sf<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll?rev=227330&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll Wed Jan 28 11:37:59 2015
@@ -0,0 +1,635 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT
+
+; Shift by immediate
+declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32)
+define i64 @S2_asr_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
+define i64 @S2_lsr_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsr(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
+define i64 @S2_asl_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asl(r1:0, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
+define i32 @S2_asr_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
+define i32 @S2_lsr_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = lsr(r0, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
+define i32 @S2_asl_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, #0)
+
+; Shift by immediate and accumulate
+declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
+define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
+define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
+define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
+define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
+define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
+define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += asl(r3:2, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
+define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
+define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
+define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
+define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
+define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
+define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += asl(r1, #0)
+
+; Shift by immediate and add
+declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
+define i32 @S4_addi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
+define i32 @S4_subi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = sub(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
+define i32 @S4_addi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
+define i32 @S4_subi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = sub(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
+define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = addasl(r0, r1, #0)
+
+; Shift by immediate and logical
+declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
+define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
+define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
+define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
+define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
+define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
+define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
+define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
+define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= asl(r3:2, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
+define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
+define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
+define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
+define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
+define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
+define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
+define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 ^= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
+define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 ^= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
+define i32 @S4_andi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = and(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
+define i32 @S4_ori_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = or(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
+define i32 @S4_andi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = and(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
+define i32 @S4_ori_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = or(#0, lsr(r0, #0))
+
+; Shift right by immediate with rounding
+declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
+define i64 @S2_asr_i_p_rnd(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, #0):rnd
+
+declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
+define i32 @S2_asr_i_r_rnd(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, #0):rnd
+
+; Shift left by immediate with saturation
+declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
+define i32 @S2_asl_i_r_sat(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, #0):sat
+
+; Shift by register
+declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
+define i64 @S2_asr_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
+define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsr(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
+define i64 @S2_asl_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = asl(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
+define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsl(r1:0, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
+define i32 @S2_asr_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, r1)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
+define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = lsr(r0, r1)
+
+declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
+define i32 @S2_asl_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, r1)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
+define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = lsl(r0, r1)
+
+declare i32 @llvm.hexagon.S4.lsli(i32, i32)
+define i32 @S4_lsli(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = lsl(#0, r0)
+
+; Shift by register and accumulate
+declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
+define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
+define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
+define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
+define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
+define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
+define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
+define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
+define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsl(r3:2, r4)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
+define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
+define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
+define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
+define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= lsl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
+define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
+define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
+define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
+define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += lsl(r1, r2)
+
+; Shift by register and logical
+declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
+define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
+define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
+define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
+define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
+define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
+define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
+define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
+define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsl(r3:2, r4)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
+define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
+define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
+define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
+define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= lsl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
+define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
+define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
+define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
+define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= lsl(r1, r2)
+
+; Shift by register with saturation
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
+define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, r1):sat
+
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
+define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, r1):sat
Modified: llvm/trunk/test/MC/Disassembler/Hexagon/xtype_shift.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/Hexagon/xtype_shift.txt?rev=227330&r1=227329&r2=227330&view=diff
==============================================================================
--- llvm/trunk/test/MC/Disassembler/Hexagon/xtype_shift.txt (original)
+++ llvm/trunk/test/MC/Disassembler/Hexagon/xtype_shift.txt Wed Jan 28 11:37:59 2015
@@ -70,6 +70,10 @@
# CHECK: r17 = and(#21, lsr(r17, #31))
0x5a 0xff 0x11 0xde
# CHECK: r17 = or(#21, lsr(r17, #31))
+0xf0 0xdf 0xd4 0x80
+# CHECK: r17:16 = asr(r21:20, #31):rnd
+0x11 0xdf 0x55 0x8c
+# CHECK: r17 = asr(r21, #31):rnd
0x11 0xdf 0x55 0x8e
# CHECK: r17 &= asr(r21, #31)
0x31 0xdf 0x55 0x8e
More information about the llvm-commits
mailing list