[llvm] r227995 - [Hexagon] Converting complex number intrinsics and adding tests.

Colin LeMahieu colinl at codeaurora.org
Tue Feb 3 10:16:28 PST 2015


Author: colinl
Date: Tue Feb  3 12:16:28 2015
New Revision: 227995

URL: http://llvm.org/viewvc/llvm-project?rev=227995&view=rev
Log:
[Hexagon] Converting complex number intrinsics and adding tests.

Added:
    llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
Modified:
    llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td
    llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td

Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td?rev=227995&r1=227994&r2=227995&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsics.td Tue Feb  3 12:16:28 2015
@@ -337,6 +337,35 @@ def : T_PRR_pat <M2_mpyud_nac_hl_s1, int
 def : T_PRR_pat <M2_mpyud_nac_lh_s1, int_hexagon_M2_mpyud_nac_lh_s1>;
 def : T_PRR_pat <M2_mpyud_nac_ll_s1, int_hexagon_M2_mpyud_nac_ll_s1>;
 
+// Vector complex multiply imaginary: Rdd=vcmpyi(Rss,Rtt)[:<<1]:sat
+def : T_PP_pat <M2_vcmpy_s1_sat_i, int_hexagon_M2_vcmpy_s1_sat_i>;
+def : T_PP_pat <M2_vcmpy_s0_sat_i, int_hexagon_M2_vcmpy_s0_sat_i>;
+
+// Vector complex multiply real: Rdd=vcmpyr(Rss,Rtt)[:<<1]:sat
+def : T_PP_pat <M2_vcmpy_s1_sat_r, int_hexagon_M2_vcmpy_s1_sat_r>;
+def : T_PP_pat <M2_vcmpy_s0_sat_r, int_hexagon_M2_vcmpy_s0_sat_r>;
+
+// Vector reduce complex multiply real or imaginary:
+// Rdd[+]=vrcmpy[ir](Rss,Rtt[*])
+def : T_PP_pat  <M2_vrcmpyi_s0,  int_hexagon_M2_vrcmpyi_s0>;
+def : T_PP_pat  <M2_vrcmpyi_s0c, int_hexagon_M2_vrcmpyi_s0c>;
+def : T_PPP_pat <M2_vrcmaci_s0,  int_hexagon_M2_vrcmaci_s0>;
+def : T_PPP_pat <M2_vrcmaci_s0c, int_hexagon_M2_vrcmaci_s0c>;
+
+def : T_PP_pat  <M2_vrcmpyr_s0,  int_hexagon_M2_vrcmpyr_s0>;
+def : T_PP_pat  <M2_vrcmpyr_s0c, int_hexagon_M2_vrcmpyr_s0c>;
+def : T_PPP_pat <M2_vrcmacr_s0,  int_hexagon_M2_vrcmacr_s0>;
+def : T_PPP_pat <M2_vrcmacr_s0c, int_hexagon_M2_vrcmacr_s0c>;
+
+// Vector reduce halfwords
+// Rdd[+]=vrmpyh(Rss,Rtt)
+def : T_PP_pat  <M2_vrmpy_s0, int_hexagon_M2_vrmpy_s0>;
+def : T_PPP_pat <M2_vrmac_s0, int_hexagon_M2_vrmac_s0>;
+
+// Vector complex multiply real or imaginary with accumulation
+// Rxx+=vcmpy[ir](Rss,Rtt):sat
+def : T_PPP_pat <M2_vcmac_s0_sat_r, int_hexagon_M2_vcmac_s0_sat_r>;
+def : T_PPP_pat <M2_vcmac_s0_sat_i, int_hexagon_M2_vcmac_s0_sat_i>;
 
 //===----------------------------------------------------------------------===//
 // Add/Subtract halfword
@@ -745,12 +774,38 @@ def : Pat<(int_hexagon_C2_vmux PredRegs:
 def : T_RR_pat <M2_dpmpyss_s0, int_hexagon_M2_dpmpyss_s0>;
 def : T_RR_pat <M2_dpmpyuu_s0, int_hexagon_M2_dpmpyuu_s0>;
 
+// Complex multiply real or imaginary
+def : T_RR_pat <M2_cmpyi_s0,   int_hexagon_M2_cmpyi_s0>;
+def : T_RR_pat <M2_cmpyr_s0,   int_hexagon_M2_cmpyr_s0>;
+
+// Complex multiply
+def : T_RR_pat <M2_cmpys_s0,   int_hexagon_M2_cmpys_s0>;
+def : T_RR_pat <M2_cmpysc_s0,  int_hexagon_M2_cmpysc_s0>;
+def : T_RR_pat <M2_cmpys_s1,   int_hexagon_M2_cmpys_s1>;
+def : T_RR_pat <M2_cmpysc_s1,  int_hexagon_M2_cmpysc_s1>;
+
 // Rxx[+-]= mpy[u](Rs,Rt)
 def : T_PRR_pat <M2_dpmpyss_acc_s0, int_hexagon_M2_dpmpyss_acc_s0>;
 def : T_PRR_pat <M2_dpmpyss_nac_s0, int_hexagon_M2_dpmpyss_nac_s0>;
 def : T_PRR_pat <M2_dpmpyuu_acc_s0, int_hexagon_M2_dpmpyuu_acc_s0>;
 def : T_PRR_pat <M2_dpmpyuu_nac_s0, int_hexagon_M2_dpmpyuu_nac_s0>;
 
+// Rxx[-+]=cmpy(Rs,Rt)[:<<1]:sat
+def : T_PRR_pat <M2_cmacs_s0, int_hexagon_M2_cmacs_s0>;
+def : T_PRR_pat <M2_cnacs_s0, int_hexagon_M2_cnacs_s0>;
+def : T_PRR_pat <M2_cmacs_s1, int_hexagon_M2_cmacs_s1>;
+def : T_PRR_pat <M2_cnacs_s1, int_hexagon_M2_cnacs_s1>;
+
+// Rxx[-+]=cmpy(Rs,Rt*)[:<<1]:sat
+def : T_PRR_pat <M2_cmacsc_s0, int_hexagon_M2_cmacsc_s0>;
+def : T_PRR_pat <M2_cnacsc_s0, int_hexagon_M2_cnacsc_s0>;
+def : T_PRR_pat <M2_cmacsc_s1, int_hexagon_M2_cmacsc_s1>;
+def : T_PRR_pat <M2_cnacsc_s1, int_hexagon_M2_cnacsc_s1>;
+
+// Rxx+=cmpy[ir](Rs,Rt)
+def : T_PRR_pat <M2_cmaci_s0, int_hexagon_M2_cmaci_s0>;
+def : T_PRR_pat <M2_cmacr_s0, int_hexagon_M2_cmacr_s0>;
+
 /********************************************************************
 *            CR                                                     *
 *********************************************************************/
@@ -807,6 +862,13 @@ def : MType_R32_pat <int_hexagon_M2_hmmp
 def : MType_R32_pat <int_hexagon_M2_hmmpyl_rs1, M2_hmmpyl_rs1>;
 def : MType_R32_pat <int_hexagon_M2_dpmpyss_rnd_s0, M2_dpmpyss_rnd_s0>;
 
+// Complex multiply with round and pack
+// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
+def : MType_R32_pat <int_hexagon_M2_cmpyrs_s0, M2_cmpyrs_s0>;
+def : MType_R32_pat <int_hexagon_M2_cmpyrs_s1, M2_cmpyrs_s1>;
+def : MType_R32_pat <int_hexagon_M2_cmpyrsc_s0, M2_cmpyrsc_s0>;
+def : MType_R32_pat <int_hexagon_M2_cmpyrsc_s1, M2_cmpyrsc_s1>;
+
 /********************************************************************
 *            STYPE/ALU                                              *
 *********************************************************************/
@@ -1503,101 +1565,6 @@ def HEXAGON_M2_vabsdiffh:
 def HEXAGON_M2_vabsdiffw:
   di_MInst_didi                   <"vabsdiffw",int_hexagon_M2_vabsdiffw>;
 
-
-/********************************************************************
-*            MTYPE/COMPLEX                                          *
-*********************************************************************/
-
-// MTYPE / COMPLEX / Complex multiply.
-// Rdd[-+]=cmpy(Rs, Rt:<<1]:sat
-def HEXAGON_M2_cmpys_s1:
-  di_MInst_sisi_s1_sat            <"cmpy",     int_hexagon_M2_cmpys_s1>;
-def HEXAGON_M2_cmpys_s0:
-  di_MInst_sisi_sat               <"cmpy",     int_hexagon_M2_cmpys_s0>;
-def HEXAGON_M2_cmpysc_s1:
-  di_MInst_sisi_s1_sat_conj       <"cmpy",     int_hexagon_M2_cmpysc_s1>;
-def HEXAGON_M2_cmpysc_s0:
-  di_MInst_sisi_sat_conj          <"cmpy",     int_hexagon_M2_cmpysc_s0>;
-
-def HEXAGON_M2_cmacs_s1:
-  di_MInst_disisi_acc_s1_sat      <"cmpy",     int_hexagon_M2_cmacs_s1>;
-def HEXAGON_M2_cmacs_s0:
-  di_MInst_disisi_acc_sat         <"cmpy",     int_hexagon_M2_cmacs_s0>;
-def HEXAGON_M2_cmacsc_s1:
-  di_MInst_disisi_acc_s1_sat_conj <"cmpy",     int_hexagon_M2_cmacsc_s1>;
-def HEXAGON_M2_cmacsc_s0:
-  di_MInst_disisi_acc_sat_conj    <"cmpy",     int_hexagon_M2_cmacsc_s0>;
-
-def HEXAGON_M2_cnacs_s1:
-  di_MInst_disisi_nac_s1_sat      <"cmpy",     int_hexagon_M2_cnacs_s1>;
-def HEXAGON_M2_cnacs_s0:
-  di_MInst_disisi_nac_sat         <"cmpy",     int_hexagon_M2_cnacs_s0>;
-def HEXAGON_M2_cnacsc_s1:
-  di_MInst_disisi_nac_s1_sat_conj <"cmpy",     int_hexagon_M2_cnacsc_s1>;
-def HEXAGON_M2_cnacsc_s0:
-  di_MInst_disisi_nac_sat_conj    <"cmpy",     int_hexagon_M2_cnacsc_s0>;
-
-// MTYPE / COMPLEX / Complex multiply real or imaginary.
-def HEXAGON_M2_cmpyr_s0:
-  di_MInst_sisi                   <"cmpyr",    int_hexagon_M2_cmpyr_s0>;
-def HEXAGON_M2_cmacr_s0:
-  di_MInst_disisi_acc             <"cmpyr",    int_hexagon_M2_cmacr_s0>;
-
-def HEXAGON_M2_cmpyi_s0:
-  di_MInst_sisi                   <"cmpyi",    int_hexagon_M2_cmpyi_s0>;
-def HEXAGON_M2_cmaci_s0:
-  di_MInst_disisi_acc             <"cmpyi",    int_hexagon_M2_cmaci_s0>;
-
-// MTYPE / COMPLEX / Complex multiply with round and pack.
-// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
-def HEXAGON_M2_cmpyrs_s0:
-  si_MInst_sisi_rnd_sat           <"cmpy",     int_hexagon_M2_cmpyrs_s0>;
-def HEXAGON_M2_cmpyrs_s1:
-  si_MInst_sisi_s1_rnd_sat        <"cmpy",     int_hexagon_M2_cmpyrs_s1>;
-
-def HEXAGON_M2_cmpyrsc_s0:
-  si_MInst_sisi_rnd_sat_conj      <"cmpy",     int_hexagon_M2_cmpyrsc_s0>;
-def HEXAGON_M2_cmpyrsc_s1:
-  si_MInst_sisi_s1_rnd_sat_conj   <"cmpy",     int_hexagon_M2_cmpyrsc_s1>;
-
-//MTYPE / COMPLEX / Vector complex multiply real or imaginary.
-def HEXAGON_M2_vcmpy_s0_sat_i:
-  di_MInst_didi_sat               <"vcmpyi",   int_hexagon_M2_vcmpy_s0_sat_i>;
-def HEXAGON_M2_vcmpy_s1_sat_i:
-  di_MInst_didi_s1_sat            <"vcmpyi",   int_hexagon_M2_vcmpy_s1_sat_i>;
-
-def HEXAGON_M2_vcmpy_s0_sat_r:
-  di_MInst_didi_sat               <"vcmpyr",   int_hexagon_M2_vcmpy_s0_sat_r>;
-def HEXAGON_M2_vcmpy_s1_sat_r:
-  di_MInst_didi_s1_sat            <"vcmpyr",   int_hexagon_M2_vcmpy_s1_sat_r>;
-
-def HEXAGON_M2_vcmac_s0_sat_i:
-  di_MInst_dididi_acc_sat         <"vcmpyi",   int_hexagon_M2_vcmac_s0_sat_i>;
-def HEXAGON_M2_vcmac_s0_sat_r:
-  di_MInst_dididi_acc_sat         <"vcmpyr",   int_hexagon_M2_vcmac_s0_sat_r>;
-
-//MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary.
-def HEXAGON_M2_vrcmpyi_s0:
-  di_MInst_didi                   <"vrcmpyi",  int_hexagon_M2_vrcmpyi_s0>;
-def HEXAGON_M2_vrcmpyr_s0:
-  di_MInst_didi                   <"vrcmpyr",  int_hexagon_M2_vrcmpyr_s0>;
-
-def HEXAGON_M2_vrcmpyi_s0c:
-  di_MInst_didi_conj              <"vrcmpyi",  int_hexagon_M2_vrcmpyi_s0c>;
-def HEXAGON_M2_vrcmpyr_s0c:
-  di_MInst_didi_conj              <"vrcmpyr",  int_hexagon_M2_vrcmpyr_s0c>;
-
-def HEXAGON_M2_vrcmaci_s0:
-  di_MInst_dididi_acc             <"vrcmpyi",  int_hexagon_M2_vrcmaci_s0>;
-def HEXAGON_M2_vrcmacr_s0:
-  di_MInst_dididi_acc             <"vrcmpyr",  int_hexagon_M2_vrcmacr_s0>;
-
-def HEXAGON_M2_vrcmaci_s0c:
-  di_MInst_dididi_acc_conj        <"vrcmpyi",  int_hexagon_M2_vrcmaci_s0c>;
-def HEXAGON_M2_vrcmacr_s0c:
-  di_MInst_dididi_acc_conj        <"vrcmpyr",  int_hexagon_M2_vrcmacr_s0c>;
-
-
 /********************************************************************
 *            MTYPE/MPYH                                             *
 *********************************************************************/

Modified: llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td?rev=227995&r1=227994&r2=227995&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonIntrinsicsV4.td Tue Feb  3 12:16:28 2015
@@ -77,9 +77,25 @@ def : T_RRI_pat <M4_mpyri_addr, int_hexa
 def : T_RRR_pat <M4_mac_up_s1_sat, int_hexagon_M4_mac_up_s1_sat>;
 def : T_RRR_pat <M4_nac_up_s1_sat, int_hexagon_M4_nac_up_s1_sat>;
 
+// Complex multiply 32x16
+def : T_PR_pat <M4_cmpyi_wh, int_hexagon_M4_cmpyi_wh>;
+def : T_PR_pat <M4_cmpyr_wh, int_hexagon_M4_cmpyr_wh>;
+
+def : T_PR_pat <M4_cmpyi_whc, int_hexagon_M4_cmpyi_whc>;
+def : T_PR_pat <M4_cmpyr_whc, int_hexagon_M4_cmpyr_whc>;
+
 def : T_PP_pat<A4_andnp, int_hexagon_A4_andnp>;
 def : T_PP_pat<A4_ornp,  int_hexagon_A4_ornp>;
 
+// Complex add/sub halfwords/words
+def : T_PP_pat <S4_vxaddsubw, int_hexagon_S4_vxaddsubw>;
+def : T_PP_pat <S4_vxsubaddw, int_hexagon_S4_vxsubaddw>;
+def : T_PP_pat <S4_vxaddsubh, int_hexagon_S4_vxaddsubh>;
+def : T_PP_pat <S4_vxsubaddh, int_hexagon_S4_vxsubaddh>;
+
+def : T_PP_pat <S4_vxaddsubhr, int_hexagon_S4_vxaddsubhr>;
+def : T_PP_pat <S4_vxsubaddhr, int_hexagon_S4_vxsubaddhr>;
+
 // Extract bitfield
 def : T_PP_pat  <S4_extractp_rp, int_hexagon_S4_extractp_rp>;
 def : T_RP_pat  <S4_extract_rp, int_hexagon_S4_extract_rp>;
@@ -109,6 +125,18 @@ def : T_PPR_pat <A4_vrminuh, int_hexagon
 def : T_PPR_pat <A4_vrminw, int_hexagon_A4_vrminw>;
 def : T_PPR_pat <A4_vrminuw, int_hexagon_A4_vrminuw>;
 
+// Rotate and reduce bytes
+def : Pat <(int_hexagon_S4_vrcrotate DoubleRegs:$src1, IntRegs:$src2,
+                                     u2ImmPred:$src3),
+           (S4_vrcrotate DoubleRegs:$src1, IntRegs:$src2, u2ImmPred:$src3)>;
+
+// Rotate and reduce bytes with accumulation
+// Rxx+=vrcrotate(Rss,Rt,#u2)
+def : Pat <(int_hexagon_S4_vrcrotate_acc DoubleRegs:$src1, DoubleRegs:$src2,
+                                         IntRegs:$src3, u2ImmPred:$src4),
+           (S4_vrcrotate_acc DoubleRegs:$src1, DoubleRegs:$src2,
+                             IntRegs:$src3, u2ImmPred:$src4)>;
+
 // Vector conditional negate
 def : T_PPR_pat<S2_vrcnegh, int_hexagon_S2_vrcnegh>;
 

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll?rev=227995&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll Tue Feb  3 12:16:28 2015
@@ -0,0 +1,349 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX
+
+; Complex add/sub halfwords
+declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64)
+define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
+define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
+define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):rnd:>>1:sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
+define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):rnd:>>1:sat
+
+; Complex add/sub words
+declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
+define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubw(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
+define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddw(r1:0, r3:2):sat
+
+; Complex multiply
+declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
+define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
+define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
+define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1*):sat
+
+declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
+define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1*):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
+define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
+define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
+define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
+define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
+define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3*):sat
+
+declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
+define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3*):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
+define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3*):sat
+
+declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
+define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3*):<<1:sat
+
+; Complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
+define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = cmpyi(r0, r1)
+
+declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
+define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = cmpyr(r0, r1)
+
+declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
+define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += cmpyi(r2, r3)
+
+declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
+define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += cmpyr(r2, r3)
+
+; Complex multiply with round and pack
+declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
+define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1):rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
+define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
+define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1*):rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
+define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1*):<<1:rnd:sat
+
+; Complex multiply 32x16
+declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
+define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpyiwh(r1:0, r2):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
+define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
+define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpyrwh(r1:0, r2):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
+define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
+  %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
+  ret i32 %z
+}
+; CHECK: r0 = cmpyrwh(r1:0, r2*):<<1:rnd:sat
+
+; Vector complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
+define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vcmpyr(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
+define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vcmpyr(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
+define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vcmpyi(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
+define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vcmpyi(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
+define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
+  %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += vcmpyr(r3:2, r5:4):sat
+
+declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
+define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
+  %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += vcmpyi(r3:2, r5:4):sat
+
+; Vector complex conjugate
+declare i64 @llvm.hexagon.A2.vconj(i64)
+define i64 @A2_vconj(i64 %a) {
+  %z = call i64 @llvm.hexagon.A2.vconj(i64 %a)
+  ret i64 %z
+}
+; CHECK: r1:0 = vconj(r1:0):sat
+
+; Vector complex rotate
+declare i64 @llvm.hexagon.S2.vcrotate(i64, i32)
+define i64 @S2_vcrotate(i64 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vcrotate(r1:0, r2)
+
+; Vector reduce complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
+define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyi(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
+define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyr(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
+define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyi(r1:0, r3:2*)
+
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
+define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
+  %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
+  ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyr(r1:0, r3:2*)
+
+declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
+define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
+  %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyi(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
+define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
+  %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyr(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
+define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
+  %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyi(r3:2, r5:4*)
+
+declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
+define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
+  %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
+  ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyr(r3:2, r5:4*)
+
+; Vector reduce complex rotate
+declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
+define i64 @S4_vrcrotate(i64 %a, i32 %b) {
+  %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
+  ret i64 %z
+}
+; CHECK: r1:0 = vrcrotate(r1:0, r2, #0)
+
+declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
+define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
+  %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
+  ret i64 %z
+}
+; CHECK: r1:0 += vrcrotate(r3:2, r4, #0)





More information about the llvm-commits mailing list