[llvm] r227335 - [Hexagon] Converting XTYPE/BIT intrinsic patterns and adding tests.
Colin LeMahieu
colinl at codeaurora.org
Wed Jan 28 10:06:23 PST 2015
Author: colinl
Date: Wed Jan 28 12:06:23 2015
New Revision: 227335
URL: http://llvm.org/viewvc/llvm-project?rev=227335&view=rev
Log:
[Hexagon] Converting XTYPE/BIT intrinsic patterns and adding tests.
Added:
llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
Modified:
llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td
llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td
Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td?rev=227335&r1=227334&r2=227335&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td Wed Jan 28 12:06:23 2015
@@ -21,6 +21,10 @@ class T_R_pat <InstHexagon MI, Intrinsic
: Pat <(IntID I32:$Rs),
(MI I32:$Rs)>;
+class T_P_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs),
+ (MI DoubleRegs:$Rs)>;
+
class T_II_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
: Pat<(IntID Imm1:$Is, Imm2:$It),
(MI Imm1:$Is, Imm2:$It)>;
@@ -37,6 +41,10 @@ class T_PI_pat <InstHexagon MI, Intrinsi
: Pat<(IntID I64:$Rs, imm:$It),
(MI DoubleRegs:$Rs, imm:$It)>;
+class T_RP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID I32:$Rs, I64:$Rt),
+ (MI I32:$Rs, DoubleRegs:$Rt)>;
+
class T_RR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt),
(MI I32:$Rs, I32:$Rt)>;
@@ -61,6 +69,10 @@ class T_RRI_pat <InstHexagon MI, Intrins
: Pat <(IntID I32:$Rs, I32:$Rt, imm:$Iu),
(MI I32:$Rs, I32:$Rt, imm:$Iu)>;
+class T_RII_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, imm:$It, imm:$Iu),
+ (MI I32:$Rs, imm:$It, imm:$Iu)>;
+
class T_IRI_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID imm:$It, I32:$Rs, imm:$Iu),
(MI imm:$It, I32:$Rs, imm:$Iu)>;
@@ -73,6 +85,10 @@ class T_PPI_pat <InstHexagon MI, Intrins
: Pat <(IntID I64:$Rs, I64:$Rt, imm:$Iu),
(MI DoubleRegs:$Rs, DoubleRegs:$Rt, imm:$Iu)>;
+class T_PII_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, imm:$It, imm:$Iu),
+ (MI DoubleRegs:$Rs, imm:$It, imm:$Iu)>;
+
class T_PPR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I64:$Rt, I32:$Ru),
(MI DoubleRegs:$Rs, DoubleRegs:$Rt, I32:$Ru)>;
@@ -549,6 +565,17 @@ def: T_PP_pat<A2_xorp, int_hexagon_A
def: T_PP_pat<S2_parityp, int_hexagon_S2_parityp>;
def: T_RR_pat<S2_packhl, int_hexagon_S2_packhl>;
+// MPY - Multiply and use full result
+// Rdd = mpy[u](Rs, Rt)
+def : T_RR_pat <M2_dpmpyss_s0, int_hexagon_M2_dpmpyss_s0>;
+def : T_RR_pat <M2_dpmpyuu_s0, int_hexagon_M2_dpmpyuu_s0>;
+
+// Rxx[+-]= mpy[u](Rs,Rt)
+def : T_PRR_pat <M2_dpmpyss_acc_s0, int_hexagon_M2_dpmpyss_acc_s0>;
+def : T_PRR_pat <M2_dpmpyss_nac_s0, int_hexagon_M2_dpmpyss_nac_s0>;
+def : T_PRR_pat <M2_dpmpyuu_acc_s0, int_hexagon_M2_dpmpyuu_acc_s0>;
+def : T_PRR_pat <M2_dpmpyuu_nac_s0, int_hexagon_M2_dpmpyuu_nac_s0>;
+
// Multiply 32x32 and use lower result
def : T_RRI_pat <M2_macsip, int_hexagon_M2_macsip>;
def : T_RRI_pat <M2_macsin, int_hexagon_M2_macsin>;
@@ -566,9 +593,98 @@ def : T_RRI_pat <M2_naccii, int_hexagon_
// XOR and XOR with destination
def : T_RRR_pat <M2_xor_xacc, int_hexagon_M2_xor_xacc>;
+class MType_R32_pat <Intrinsic IntID, InstHexagon OutputInst> :
+ Pat <(IntID IntRegs:$src1, IntRegs:$src2),
+ (OutputInst IntRegs:$src1, IntRegs:$src2)>;
+
+// Multiply and use lower result
+def : MType_R32_pat <int_hexagon_M2_mpyi, M2_mpyi>;
+def : T_RI_pat<M2_mpysmi, int_hexagon_M2_mpysmi>;
+
+// Assembler mapped from Rd32=mpyui(Rs32,Rt32) to Rd32=mpyi(Rs32,Rt32)
+def : MType_R32_pat <int_hexagon_M2_mpyui, M2_mpyi>;
+
+// Multiply and use upper result
+def : MType_R32_pat <int_hexagon_M2_mpy_up, M2_mpy_up>;
+def : MType_R32_pat <int_hexagon_M2_mpyu_up, M2_mpyu_up>;
+def : MType_R32_pat <int_hexagon_M2_hmmpyh_rs1, M2_hmmpyh_rs1>;
+def : MType_R32_pat <int_hexagon_M2_hmmpyl_rs1, M2_hmmpyl_rs1>;
+def : MType_R32_pat <int_hexagon_M2_dpmpyss_rnd_s0, M2_dpmpyss_rnd_s0>;
+
+/********************************************************************
+* STYPE/ALU *
+*********************************************************************/
+def : T_P_pat <A2_absp, int_hexagon_A2_absp>;
+def : T_P_pat <A2_negp, int_hexagon_A2_negp>;
+def : T_P_pat <A2_notp, int_hexagon_A2_notp>;
+
+/********************************************************************
+* STYPE/BIT *
+*********************************************************************/
+
+// Count leading/trailing
+def: T_R_pat<S2_cl0, int_hexagon_S2_cl0>;
+def: T_P_pat<S2_cl0p, int_hexagon_S2_cl0p>;
+def: T_R_pat<S2_cl1, int_hexagon_S2_cl1>;
+def: T_P_pat<S2_cl1p, int_hexagon_S2_cl1p>;
+def: T_R_pat<S2_clb, int_hexagon_S2_clb>;
+def: T_P_pat<S2_clbp, int_hexagon_S2_clbp>;
+def: T_R_pat<S2_clbnorm, int_hexagon_S2_clbnorm>;
+def: T_R_pat<S2_ct0, int_hexagon_S2_ct0>;
+def: T_R_pat<S2_ct1, int_hexagon_S2_ct1>;
+
+// Compare bit mask
+def: T_RR_pat<C2_bitsclr, int_hexagon_C2_bitsclr>;
+def: T_RI_pat<C2_bitsclri, int_hexagon_C2_bitsclri>;
+def: T_RR_pat<C2_bitsset, int_hexagon_C2_bitsset>;
+
// Shift by immediate and add
def : T_RRI_pat<S2_addasl_rrri, int_hexagon_S2_addasl_rrri>;
+// Extract bitfield
+def : T_PII_pat<S2_extractup, int_hexagon_S2_extractup>;
+def : T_RII_pat<S2_extractu, int_hexagon_S2_extractu>;
+def : T_RP_pat <S2_extractu_rp, int_hexagon_S2_extractu_rp>;
+def : T_PP_pat <S2_extractup_rp, int_hexagon_S2_extractup_rp>;
+
+// Insert bitfield
+def : Pat <(int_hexagon_S2_insert_rp IntRegs:$src1, IntRegs:$src2,
+ DoubleRegs:$src3),
+ (S2_insert_rp IntRegs:$src1, IntRegs:$src2, DoubleRegs:$src3)>;
+
+def : Pat<(i64 (int_hexagon_S2_insertp_rp (I64:$src1),
+ (I64:$src2), (I64:$src3))),
+ (i64 (S2_insertp_rp (I64:$src1), (I64:$src2),
+ (I64:$src3)))>;
+
+def : Pat<(int_hexagon_S2_insert IntRegs:$src1, IntRegs:$src2,
+ u5ImmPred:$src3, u5ImmPred:$src4),
+ (S2_insert IntRegs:$src1, IntRegs:$src2,
+ u5ImmPred:$src3, u5ImmPred:$src4)>;
+
+def : Pat<(i64 (int_hexagon_S2_insertp (I64:$src1),
+ (I64:$src2), u6ImmPred:$src3, u6ImmPred:$src4)),
+ (i64 (S2_insertp (I64:$src1), (I64:$src2),
+ u6ImmPred:$src3, u6ImmPred:$src4))>;
+
+
+// Innterleave/deinterleave
+def : T_P_pat <S2_interleave, int_hexagon_S2_interleave>;
+def : T_P_pat <S2_deinterleave, int_hexagon_S2_deinterleave>;
+
+// Set/Clear/Toggle Bit
+def: T_RI_pat<S2_setbit_i, int_hexagon_S2_setbit_i>;
+def: T_RI_pat<S2_clrbit_i, int_hexagon_S2_clrbit_i>;
+def: T_RI_pat<S2_togglebit_i, int_hexagon_S2_togglebit_i>;
+
+def: T_RR_pat<S2_setbit_r, int_hexagon_S2_setbit_r>;
+def: T_RR_pat<S2_clrbit_r, int_hexagon_S2_clrbit_r>;
+def: T_RR_pat<S2_togglebit_r, int_hexagon_S2_togglebit_r>;
+
+// Test Bit
+def: T_RI_pat<S2_tstbit_i, int_hexagon_S2_tstbit_i>;
+def: T_RR_pat<S2_tstbit_r, int_hexagon_S2_tstbit_r>;
+
/********************************************************************
* STYPE/SHIFT *
*********************************************************************/
@@ -2908,8 +3024,6 @@ def Hexagon_M2_mpysmi:
si_MInst_sis9 <"mpyi", int_hexagon_M2_mpysmi>;
def HEXAGON_M2_mpyi:
si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>;
-def HEXAGON_M2_mpyui:
- si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>;
def HEXAGON_M2_macsip:
si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>;
def HEXAGON_M2_maci:
@@ -2989,32 +3103,6 @@ def HEXAGON_M2_mmacuhs_rs0:
def HEXAGON_M2_mmacuhs_s0:
di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>;
-// MTYPE / MPYH / Multiply and use upper result.
-def HEXAGON_M2_hmmpyh_rs1:
- si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>;
-def HEXAGON_M2_hmmpyl_rs1:
- si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>;
-def HEXAGON_M2_mpy_up:
- si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>;
-def HEXAGON_M2_dpmpyss_rnd_s0:
- si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>;
-def HEXAGON_M2_mpyu_up:
- si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>;
-
-// MTYPE / MPYH / Multiply and use full result.
-def HEXAGON_M2_dpmpyuu_s0:
- di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>;
-def HEXAGON_M2_dpmpyuu_acc_s0:
- di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>;
-def HEXAGON_M2_dpmpyuu_nac_s0:
- di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>;
-def HEXAGON_M2_dpmpyss_s0:
- di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>;
-def HEXAGON_M2_dpmpyss_acc_s0:
- di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>;
-def HEXAGON_M2_dpmpyss_nac_s0:
- di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>;
-
/********************************************************************
* MTYPE/VB *
*********************************************************************/
@@ -3096,15 +3184,9 @@ def HEXAGON_M2_vrmac_s0:
// STYPE / ALU / Absolute value.
def HEXAGON_A2_abs:
si_SInst_si <"abs", int_hexagon_A2_abs>;
-def HEXAGON_A2_absp:
- di_SInst_di <"abs", int_hexagon_A2_absp>;
def HEXAGON_A2_abssat:
si_SInst_si_sat <"abs", int_hexagon_A2_abssat>;
-// STYPE / ALU / Logical Not.
-def HEXAGON_A2_notp:
- di_SInst_di <"not", int_hexagon_A2_notp>;
-
// STYPE / ALU / Sign extend word to doubleword.
def HEXAGON_A2_sxtw:
di_SInst_si <"sxtw", int_hexagon_A2_sxtw>;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td?rev=227335&r1=227334&r2=227335&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td Wed Jan 28 12:06:23 2015
@@ -12,6 +12,20 @@
// 80-V9418-12 Rev. A
// June 15, 2010
+def : T_P_pat <S2_brevp, int_hexagon_S2_brevp>;
+
+def: T_P_pat <S2_ct0p, int_hexagon_S2_ct0p>;
+def: T_P_pat <S2_ct1p, int_hexagon_S2_ct1p>;
+def: T_RR_pat<C4_nbitsset, int_hexagon_C4_nbitsset>;
+def: T_RR_pat<C4_nbitsclr, int_hexagon_C4_nbitsclr>;
+def: T_RI_pat<C4_nbitsclri, int_hexagon_C4_nbitsclri>;
+
+// Extract bitfield
+def : T_PP_pat <S4_extractp_rp, int_hexagon_S4_extractp_rp>;
+def : T_RP_pat <S4_extract_rp, int_hexagon_S4_extract_rp>;
+def : T_PII_pat <S4_extractp, int_hexagon_S4_extractp>;
+def : T_RII_pat <S4_extract, int_hexagon_S4_extract>;
+
// Shift an immediate left by register amount
def : T_IR_pat<S4_lsli, int_hexagon_S4_lsli>;
@@ -25,6 +39,19 @@ def : T_IRI_pat <S4_ori_lsr_ri, int_hex
def : T_IRI_pat <S4_addi_lsr_ri, int_hexagon_S4_addi_lsr_ri>;
def : T_IRI_pat <S4_subi_lsr_ri, int_hexagon_S4_subi_lsr_ri>;
+// Split bitfield
+def : T_RI_pat <A4_bitspliti, int_hexagon_A4_bitspliti>;
+def : T_RR_pat <A4_bitsplit, int_hexagon_A4_bitsplit>;
+
+def: T_RR_pat<S4_parity, int_hexagon_S4_parity>;
+
+def: T_RI_pat<S4_ntstbit_i, int_hexagon_S4_ntstbit_i>;
+def: T_RR_pat<S4_ntstbit_r, int_hexagon_S4_ntstbit_r>;
+
+def: T_RI_pat<S4_clbaddi, int_hexagon_S4_clbaddi>;
+def: T_PI_pat<S4_clbpaddi, int_hexagon_S4_clbpaddi>;
+def: T_P_pat <S4_clbpnorm, int_hexagon_S4_clbpnorm>;
+
//
// ALU 32 types.
//
Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll?rev=227335&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll Wed Jan 28 12:06:23 2015
@@ -0,0 +1,329 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT
+
+; Count leading
+declare i32 @llvm.hexagon.S2.clbp(i64)
+define i32 @S2_clbp(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.clbp(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = clb(r1:0)
+
+declare i32 @llvm.hexagon.S2.cl0p(i64)
+define i32 @S2_cl0p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl0(r1:0)
+
+declare i32 @llvm.hexagon.S2.cl1p(i64)
+define i32 @S2_cl1p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl1(r1:0)
+
+declare i32 @llvm.hexagon.S4.clbpnorm(i64)
+define i32 @S4_clbpnorm(i64 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = normamt(r1:0)
+
+declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32)
+define i32 @S4_clbpaddi(i64 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(clb(r1:0), #0)
+
+declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
+define i32 @S4_clbaddi(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(clb(r0), #0)
+
+declare i32 @llvm.hexagon.S2.cl0(i32)
+define i32 @S2_cl0(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl0(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl0(r0)
+
+declare i32 @llvm.hexagon.S2.cl1(i32)
+define i32 @S2_cl1(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl1(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl1(r0)
+
+declare i32 @llvm.hexagon.S2.clbnorm(i32)
+define i32 @S4_clbnorm(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = normamt(r0)
+
+; Count population
+declare i32 @llvm.hexagon.S5.popcountp(i64)
+define i32 @S5_popcountp(i64 %a) {
+ %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = popcount(r1:0)
+
+; Count trailing
+declare i32 @llvm.hexagon.S2.ct0p(i64)
+define i32 @S2_ct0p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct0(r1:0)
+
+declare i32 @llvm.hexagon.S2.ct1p(i64)
+define i32 @S2_ct1p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct1(r1:0)
+
+declare i32 @llvm.hexagon.S2.ct0(i32)
+define i32 @S2_ct0(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct0(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct0(r0)
+
+declare i32 @llvm.hexagon.S2.ct1(i32)
+define i32 @S2_ct1(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct1(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct1(r0)
+
+; Extract bitfield
+declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32)
+define i64 @S2_extractup(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = extractu(r1:0, #0, #0)
+
+declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
+define i64 @S2_extractp(i64 %a) {
+ %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = extract(r1:0, #0, #0)
+
+declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
+define i32 @S2_extractu(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = extractu(r0, #0, #0)
+
+declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
+define i32 @S2_extract(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = extract(r0, #0, #0)
+
+declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
+define i64 @S2_extractup_rp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = extractu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
+define i64 @S4_extractp_rp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = extract(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
+define i32 @S2_extractu_rp(i32 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = extractu(r0, r3:2)
+
+declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
+define i32 @S4_extract_rp(i32 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = extract(r0, r3:2)
+
+; Insert bitfield
+declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
+define i64 @S2_insertp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = insert(r3:2, #0, #0)
+
+declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
+define i32 @S2_insert(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = insert(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
+define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
+ %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
+ ret i32 %z
+}
+; CHECK: r0 = insert(r1, r3:2)
+
+declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
+define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = insert(r3:2, r5:4)
+
+; Interleave/deinterleave
+declare i64 @llvm.hexagon.S2.deinterleave(i64)
+define i64 @S2_deinterleave(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = deinterleave(r1:0)
+
+declare i64 @llvm.hexagon.S2.interleave(i64)
+define i64 @S2_interleave(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.interleave(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = interleave(r1:0)
+
+; Linear feedback-shift operation
+declare i64 @llvm.hexagon.S2.lfsp(i64, i64)
+define i64 @S2_lfsp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lfs(r1:0, r3:2)
+
+; Masked parity
+declare i32 @llvm.hexagon.S2.parityp(i64, i64)
+define i32 @S2_parityp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = parity(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.S4.parity(i32, i32)
+define i32 @S4_parity(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = parity(r0, r1)
+
+; Bit reverse
+declare i64 @llvm.hexagon.S2.brevp(i64)
+define i64 @S2_brevp(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.brevp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = brev(r1:0)
+
+declare i32 @llvm.hexagon.S2.brev(i32)
+define i32 @S2_brev(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.brev(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = brev(r0)
+
+; Set/clear/toggle bit
+declare i32 @llvm.hexagon.S2.setbit.i(i32, i32)
+define i32 @S2_setbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = setbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
+define i32 @S2_clrbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = clrbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
+define i32 @S2_togglebit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = togglebit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
+define i32 @S2_setbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = setbit(r0, r1)
+
+declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
+define i32 @S2_clrbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = clrbit(r0, r1)
+
+declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
+define i32 @S2_togglebit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = togglebit(r0, r1)
+
+; Split bitfield
+declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
+define i64 @A4_bitspliti(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: = bitsplit(r0, #0)
+
+declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
+define i64 @A4_bitsplit(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = bitsplit(r0, r1)
+
+; Table index
+declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxb(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxh(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxw(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxd(r1, #0, #0)
More information about the llvm-commits
mailing list