[PATCH] [AArch64 neon] support poly64 and relevant intrinsic functions -llvm

Kevin Qin kevinqindev at gmail.com
Tue Nov 12 01:49:59 PST 2013


Hi t.p.northover,

Please review, thanks.

http://llvm-reviews.chandlerc.com/D2148

Files:
  include/llvm/IR/IntrinsicsAArch64.td
  lib/Target/AArch64/AArch64InstrNEON.td
  test/CodeGen/AArch64/neon-scalar-shift-imm.ll

Index: include/llvm/IR/IntrinsicsAArch64.td
===================================================================
--- include/llvm/IR/IntrinsicsAArch64.td
+++ include/llvm/IR/IntrinsicsAArch64.td
@@ -254,12 +254,6 @@
 // Scalar Signed Saturating Shift Left Unsigned (Immediate)
 def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
 
-// Shift Right And Insert (Immediate)
-def int_aarch64_neon_vsrid_n : Neon_3Arg_ShiftImm_Intrinsic;
-
-// Shift Left And Insert (Immediate)
-def int_aarch64_neon_vslid_n : Neon_3Arg_ShiftImm_Intrinsic;
-
 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
 def int_aarch64_neon_vcvtf32_n_s32 :
   Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
Index: lib/Target/AArch64/AArch64InstrNEON.td
===================================================================
--- lib/Target/AArch64/AArch64InstrNEON.td
+++ lib/Target/AArch64/AArch64InstrNEON.td
@@ -4196,11 +4196,11 @@
 
 // Shift Right And Insert (Immediate)
 def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
-def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsrid_n, SRI>;
+def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsri, SRI>;
 
 // Shift Left And Insert (Immediate)
 def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
-def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vslid_n, SLI>;
+def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsli, SLI>;
 
 // Signed Saturating Shift Right Narrow (Immediate)
 defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
@@ -4450,6 +4450,14 @@
 def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
 
+class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
+                                              Instruction INSTD,
+                                              CondCode CC>
+  : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
+        (INSTD FPR64:$Rn, FPR64:$Rm)>;
+
+def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
+
 // Scalar Compare Signed Greather Than Or Equal
 def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
@@ -4469,6 +4477,7 @@
 // Scalar Compare Bitwise Test Bits
 def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
+def : Neon_Scalar3Same_cmp_D_size_patterns<Neon_tst, CMTSTddd>;
 
 // Scalar Compare Bitwise Equal To Zero
 def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
Index: test/CodeGen/AArch64/neon-scalar-shift-imm.ll
===================================================================
--- test/CodeGen/AArch64/neon-scalar-shift-imm.ll
+++ test/CodeGen/AArch64/neon-scalar-shift-imm.ll
@@ -272,12 +272,12 @@
 entry:
   %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
   %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
-  %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
+  %vsri2 = tail call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
   %0 = extractelement <1 x i64> %vsri2, i32 0
   ret i64 %0
 }
 
-declare <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64>, <1 x i64>, i32)
+declare <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64>, <1 x i64>, i32)
 
 define i64 @test_vsrid_n_u64(i64 %a, i64 %b) {
 ; CHECK: test_vsrid_n_u64
@@ -285,7 +285,7 @@
 entry:
   %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
   %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
-  %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
+  %vsri2 = tail call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
   %0 = extractelement <1 x i64> %vsri2, i32 0
   ret i64 %0
 }
@@ -296,12 +296,12 @@
 entry:
   %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
   %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
-  %vsli2 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
+  %vsli2 = tail call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
   %0 = extractelement <1 x i64> %vsli2, i32 0
   ret i64 %0
 }
 
-declare <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64>, <1 x i64>, i32)
+declare <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64>, <1 x i64>, i32)
 
 define i64 @test_vslid_n_u64(i64 %a, i64 %b) {
 ; CHECK: test_vslid_n_u64
@@ -309,7 +309,7 @@
 entry:
   %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
   %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
-  %vsli2 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
+  %vsli2 = tail call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
   %0 = extractelement <1 x i64> %vsli2, i32 0
   ret i64 %0
 }
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D2148.1.patch
Type: text/x-patch
Size: 5042 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20131112/85df006f/attachment.bin>


More information about the llvm-commits mailing list