[llvm] r196832 - [AArch64] Refactor NEON scalar reduce pairwise front-end codegen to remove

Chad Rosier mcrosier at codeaurora.org
Mon Dec 9 14:47:34 PST 2013


Author: mcrosier
Date: Mon Dec  9 16:47:34 2013
New Revision: 196832

URL: http://llvm.org/viewvc/llvm-project?rev=196832&view=rev
Log:
[AArch64] Refactor NEON scalar reduce pairwise front-end codegen to remove
unnecessary patterns in tablegen.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
    llvm/trunk/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td?rev=196832&r1=196831&r2=196832&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td Mon Dec  9 16:47:34 2013
@@ -5333,27 +5333,12 @@ defm : Neon_ScalarPair_SD_size_patterns<
 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
                                         FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
 
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vaddv,
-                                        FADDPvv_S_2S, FADDPvv_D_2D>;
-
-def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))),
+def : Pat<(v1f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))),
           (FADDPvv_S_2S (v2f32
                (EXTRACT_SUBREG
                    (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
                    sub_64)))>;
 
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxv,
-                                        FMAXPvv_S_2S, FMAXPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminv,
-                                        FMINPvv_S_2S, FMINPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxnmv,
-                                        FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminnmv,
-                                        FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
-
 // Scalar by element Arithmetic
 
 class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,

Modified: llvm/trunk/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll?rev=196832&r1=196831&r2=196832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll Mon Dec  9 16:47:34 2013
@@ -103,7 +103,7 @@ define <1 x double> @test_fminnmp_v1f64(
 define float @test_vaddv_f32(<2 x float> %a) {
 ; CHECK-LABEL: test_vaddv_f32
 ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
-  %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a)
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a)
   %2 = extractelement <1 x float> %1, i32 0
   ret float %2
 }
@@ -112,7 +112,7 @@ define float @test_vaddvq_f32(<4 x float
 ; CHECK-LABEL: test_vaddvq_f32
 ; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
 ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
-  %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a)
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float> %a)
   %2 = extractelement <1 x float> %1, i32 0
   ret float %2
 }
@@ -120,7 +120,7 @@ define float @test_vaddvq_f32(<4 x float
 define double @test_vaddvq_f64(<2 x double> %a) {
 ; CHECK-LABEL: test_vaddvq_f64
 ; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
-  %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a)
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a)
   %2 = extractelement <1 x double> %1, i32 0
   ret double %2
 }
@@ -128,7 +128,7 @@ define double @test_vaddvq_f64(<2 x doub
 define float @test_vmaxv_f32(<2 x float> %a) {
 ; CHECK-LABEL: test_vmaxv_f32
 ; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
-  %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a)
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a)
   %2 = extractelement <1 x float> %1, i32 0
   ret float %2
 }
@@ -136,7 +136,7 @@ define float @test_vmaxv_f32(<2 x float>
 define double @test_vmaxvq_f64(<2 x double> %a) {
 ; CHECK-LABEL: test_vmaxvq_f64
 ; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
-  %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a)
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a)
   %2 = extractelement <1 x double> %1, i32 0
   ret double %2
 }
@@ -144,7 +144,7 @@ define double @test_vmaxvq_f64(<2 x doub
 define float @test_vminv_f32(<2 x float> %a) {
 ; CHECK-LABEL: test_vminv_f32
 ; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
-  %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a)
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a)
   %2 = extractelement <1 x float> %1, i32 0
   ret float %2
 }
@@ -152,7 +152,7 @@ define float @test_vminv_f32(<2 x float>
 define double @test_vminvq_f64(<2 x double> %a) {
 ; CHECK-LABEL: test_vminvq_f64
 ; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
-  %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a)
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a)
   %2 = extractelement <1 x double> %1, i32 0
   ret double %2
 }
@@ -160,7 +160,7 @@ define double @test_vminvq_f64(<2 x doub
 define double @test_vmaxnmvq_f64(<2 x double> %a) {
 ; CHECK-LABEL: test_vmaxnmvq_f64
 ; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
-  %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a)
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a)
   %2 = extractelement <1 x double> %1, i32 0
   ret double %2
 }
@@ -168,7 +168,7 @@ define double @test_vmaxnmvq_f64(<2 x do
 define float @test_vmaxnmv_f32(<2 x float> %a) {
 ; CHECK-LABEL: test_vmaxnmv_f32
 ; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
-  %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a)
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a)
   %2 = extractelement <1 x float> %1, i32 0
   ret float %2
 }
@@ -176,7 +176,7 @@ define float @test_vmaxnmv_f32(<2 x floa
 define double @test_vminnmvq_f64(<2 x double> %a) {
 ; CHECK-LABEL: test_vminnmvq_f64
 ; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
-  %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a)
+  %1 = tail call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a)
   %2 = extractelement <1 x double> %1, i32 0
   ret double %2
 }
@@ -184,7 +184,7 @@ define double @test_vminnmvq_f64(<2 x do
 define float @test_vminnmv_f32(<2 x float> %a) {
 ; CHECK-LABEL: test_vminnmv_f32
 ; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
-  %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a)
+  %1 = tail call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a)
   %2 = extractelement <1 x float> %1, i32 0
   ret float %2
 }
@@ -223,24 +223,4 @@ declare <1 x i64> @llvm.aarch64.neon.vad
 
 declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
 
-declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>)
-
-declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>)
-
-declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>)
\ No newline at end of file
+declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float>)





More information about the llvm-commits mailing list