[llvm-branch-commits] [llvm-branch] r196691 - Merging r196198:

Bill Wendling isanbard at gmail.com
Sat Dec 7 16:05:36 PST 2013


Author: void
Date: Sat Dec  7 18:05:35 2013
New Revision: 196691

URL: http://llvm.org/viewvc/llvm-project?rev=196691&view=rev
Log:
Merging r196198:
------------------------------------------------------------------------
r196198 | haoliu | 2013-12-02 19:39:47 -0800 (Mon, 02 Dec 2013) | 3 lines

AArch64: Add missing scalar pair intrinsics.
E.g. "float32_t vaddv_f32(float32x2_t a)" to be matched into "faddp s0, v1.2s".

------------------------------------------------------------------------

Modified:
    llvm/branches/release_34/   (props changed)
    llvm/branches/release_34/lib/Target/AArch64/AArch64InstrNEON.td
    llvm/branches/release_34/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll

Propchange: llvm/branches/release_34/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Dec  7 18:05:35 2013
@@ -1,3 +1,3 @@
 /llvm/branches/Apple/Pertwee:110850,110961
 /llvm/branches/type-system-rewrite:133420-134817
-/llvm/trunk:155241,195092-195094,195100,195102-195103,195118,195129,195136,195138,195148,195152,195156-195157,195161-195162,195193,195272,195317-195318,195327,195330,195333,195339,195343,195355,195364,195379,195397-195399,195401,195408,195421,195423-195424,195432,195439,195444,195455-195456,195469,195476-195477,195479,195491-195493,195514,195528,195547,195567,195573-195576,195590-195591,195599,195632,195635-195636,195670,195677,195679,195682,195684,195713,195716,195769,195773,195779,195782,195787-195788,195791,195803,195812,195827,195834,195843-195844,195878-195881,195887,195903,195905,195912,195915,195932,195936-195943,195972-195973,195975-195976,196004,196044-196046,196069,196100,196104,196129,196151,196153,196189-196192,196261,196267,196269,196294,196369,196391,196508,196532,196538,196611,196638,196658
+/llvm/trunk:155241,195092-195094,195100,195102-195103,195118,195129,195136,195138,195148,195152,195156-195157,195161-195162,195193,195272,195317-195318,195327,195330,195333,195339,195343,195355,195364,195379,195397-195399,195401,195408,195421,195423-195424,195432,195439,195444,195455-195456,195469,195476-195477,195479,195491-195493,195514,195528,195547,195567,195573-195576,195590-195591,195599,195632,195635-195636,195670,195677,195679,195682,195684,195713,195716,195769,195773,195779,195782,195787-195788,195791,195803,195812,195827,195834,195843-195844,195878-195881,195887,195903,195905,195912,195915,195932,195936-195943,195972-195973,195975-195976,196004,196044-196046,196069,196100,196104,196129,196151,196153,196189-196192,196198,196261,196267,196269,196294,196369,196391,196508,196532,196538,196611,196638,196658

Modified: llvm/branches/release_34/lib/Target/AArch64/AArch64InstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_34/lib/Target/AArch64/AArch64InstrNEON.td?rev=196691&r1=196690&r2=196691&view=diff
==============================================================================
--- llvm/branches/release_34/lib/Target/AArch64/AArch64InstrNEON.td (original)
+++ llvm/branches/release_34/lib/Target/AArch64/AArch64InstrNEON.td Sat Dec  7 18:05:35 2013
@@ -5194,6 +5194,8 @@ defm ADDPvv : NeonI_ScalarPair_D_sizes<0
 // Scalar Reduce Addition Pairwise (Integer)
 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
           (ADDPvv_D_2D VPR128:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
+          (ADDPvv_D_2D VPR128:$Rn)>;
 
 // Scalar Reduce Addition Pairwise (Floating Point)
 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
@@ -5237,6 +5239,26 @@ defm : Neon_ScalarPair_SD_size_patterns<
 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm, 
   int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
 
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vaddv, 
+    int_aarch64_neon_vaddv, FADDPvv_S_2S, FADDPvv_D_2D>;
+
+def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))),
+          (FADDPvv_S_2S (v2f32
+               (EXTRACT_SUBREG
+                   (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
+                   sub_64)))>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxv, 
+    int_aarch64_neon_vmaxv, FMAXPvv_S_2S, FMAXPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminv, 
+    int_aarch64_neon_vminv, FMINPvv_S_2S, FMINPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxnmv, 
+    int_aarch64_neon_vmaxnmv, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminnmv, 
+    int_aarch64_neon_vminnmv, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
 
 // Scalar by element Arithmetic
 

Modified: llvm/branches/release_34/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_34/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll?rev=196691&r1=196690&r2=196691&view=diff
==============================================================================
--- llvm/branches/release_34/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll (original)
+++ llvm/branches/release_34/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll Sat Dec  7 18:05:35 2013
@@ -101,3 +101,147 @@ define <1 x double> @test_fminnmp_v1f64(
         ret <1 x double> %val
 }
 
+define float @test_vaddv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vaddv_f32
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define float @test_vaddvq_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vaddvq_f32
+; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vaddvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vaddvq_f64
+; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vmaxv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vmaxv_f32
+; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vmaxvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vmaxvq_f64
+; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vminv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vminv_f32
+; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vminvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vminvq_f64
+; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define double @test_vmaxnmvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vmaxnmvq_f64
+; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vmaxnmv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vmaxnmv_f32
+; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define double @test_vminnmvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vminnmvq_f64
+; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a)
+  %2 = extractelement <1 x double> %1, i32 0
+  ret double %2
+}
+
+define float @test_vminnmv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vminnmv_f32
+; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a)
+  %2 = extractelement <1 x float> %1, i32 0
+  ret float %2
+}
+
+define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vpaddq_s64
+; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+  %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vpaddq_u64
+; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+  %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %1
+}
+
+define i64 @test_vaddvq_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vaddvq_s64
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+  %2 = extractelement <1 x i64> %1, i32 0
+  ret i64 %2
+}
+
+define i64 @test_vaddvq_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vaddvq_u64
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+  %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+  %2 = extractelement <1 x i64> %1, i32 0
+  ret i64 %2
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>)
+
+declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
+
+declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>)
+
+declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>)
\ No newline at end of file





More information about the llvm-branch-commits mailing list