[llvm] r336754 - [Power9] Add remaining __flaot128 builtin support for FMA round to odd

Stefan Pintilie via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 10 18:42:22 PDT 2018


Author: stefanp
Date: Tue Jul 10 18:42:22 2018
New Revision: 336754

URL: http://llvm.org/viewvc/llvm-project?rev=336754&view=rev
Log:
[Power9] Add remaining __flaot128 builtin support for FMA round to odd

Implement this as it is done on GCC:

__float128 a, b, c, d;
a = __builtin_fmaf128_round_to_odd (b, c, d);         // generates xsmaddqpo
a = __builtin_fmaf128_round_to_odd (b, c, -d);        // generates xsmsubqpo
a = - __builtin_fmaf128_round_to_odd (b, c, d);       // generates xsnmaddqpo
a = - __builtin_fmaf128_round_to_odd (b, c, -d);      // generates xsnmsubpqp

Differential Revision: https://reviews.llvm.org/D48218

Modified:
    llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td
    llvm/trunk/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll

Modified: llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td?rev=336754&r1=336753&r2=336754&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td Tue Jul 10 18:42:22 2018
@@ -2498,17 +2498,26 @@ let AddedComplexity = 400, Predicates =
                                        [(set f128:$vT,
                                              (fma f128:$vA, f128:$vB,
                                                   (fneg f128:$vTi)))]>;
-  def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" , []>;
+  def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" ,
+                                      [(set f128:$vT,
+                                      (int_ppc_fmaf128_round_to_odd
+                                      f128:$vA, f128:$vB, (fneg f128:$vTi)))]>;
   def XSNMADDQP : X_VT5_VA5_VB5_FMA <63, 452, "xsnmaddqp",
                                      [(set f128:$vT,
                                            (fneg (fma f128:$vA, f128:$vB,
                                                       f128:$vTi)))]>;
-  def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo", []>;
+  def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo",
+                                      [(set f128:$vT,
+                                      (fneg (int_ppc_fmaf128_round_to_odd
+                                      f128:$vA, f128:$vB, f128:$vTi)))]>;
   def XSNMSUBQP : X_VT5_VA5_VB5_FMA <63, 484, "xsnmsubqp",
                                      [(set f128:$vT,
                                            (fneg (fma f128:$vA, f128:$vB,
                                                       (fneg f128:$vTi))))]>;
-  def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo", []>;
+  def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo",
+                                      [(set f128:$vT,
+                                      (fneg (int_ppc_fmaf128_round_to_odd
+                                      f128:$vA, f128:$vB, (fneg f128:$vTi))))]>;
 
   // Additional fnmsub patterns: -a*c + b == -(a*c - b)
   def : Pat<(fma (fneg f128:$A), f128:$C, f128:$B), (XSNMSUBQP $B, $C, $A)>;

Modified: llvm/trunk/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll?rev=336754&r1=336753&r2=336754&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll Tue Jul 10 18:42:22 2018
@@ -1,92 +1,103 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -enable-ppc-quad-precision \
-; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
+; RUN:   -mtriple=powerpc64le-unknown-unknown -ppc-vsr-nums-as-vr \
+; RUN:   -ppc-asm-full-reg-names < %s | FileCheck %s
 
 @A = common global fp128 0xL00000000000000000000000000000000, align 16
 @B = common global fp128 0xL00000000000000000000000000000000, align 16
 @C = common global fp128 0xL00000000000000000000000000000000, align 16
+ at D = common global fp128 0xL00000000000000000000000000000000, align 16
 
-define fp128 @testSqrtOdd() {
+define fp128 @testSqrtOdd(fp128 %a) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %0)
-  ret fp128 %1
+  %0 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %a)
+  ret fp128 %0
 ; CHECK-LABEL: testSqrtOdd
-; CHECK: xssqrtqpo
+; CHECK: xssqrtqpo v2, v2
+; CHECK: blr
 }
 
 declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128)
 
-define fp128 @testFMAOdd() {
+define void @testFMAOdd(fp128 %a, fp128 %b, fp128 %c) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = load fp128, fp128* @B, align 16
-  %2 = load fp128, fp128* @C, align 16
-  %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %0, fp128 %1, fp128 %2)
-  ret fp128 %3
+  %0 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
+  store fp128 %0, fp128* @A, align 16
+  %sub = fsub fp128 0xL00000000000000008000000000000000, %c
+  %1 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub)
+  store fp128 %1, fp128* @B, align 16
+  %2 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
+  %sub1 = fsub fp128 0xL00000000000000008000000000000000, %2
+  store fp128 %sub1, fp128* @C, align 16
+  %sub2 = fsub fp128 0xL00000000000000008000000000000000, %c
+  %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub2)
+  %sub3 = fsub fp128 0xL00000000000000008000000000000000, %3
+  store fp128 %sub3, fp128* @D, align 16
+  ret void
 ; CHECK-LABEL: testFMAOdd
-; CHECK: xsmaddqpo
+; CHECK-DAG: xsmaddqpo v{{[0-9]+}}, v2, v3
+; CHECK-DAG: xsmsubqpo v{{[0-9]+}}, v2, v3
+; CHECK-DAG: xsnmaddqpo v{{[0-9]+}}, v2, v3
+; CHECK-DAG: xsnmsubqpo v{{[0-9]+}}, v2, v3
+; CHECK: blr
 }
 
 declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128)
 
-define fp128 @testAddOdd() {
+define fp128 @testAddOdd(fp128 %a, fp128 %b) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = load fp128, fp128* @B, align 16
-  %2 = call fp128 @llvm.ppc.addf128.round.to.odd(fp128 %0, fp128 %1)
-  ret fp128 %2
+  %0 = call fp128 @llvm.ppc.addf128.round.to.odd(fp128 %a, fp128 %b)
+  ret fp128 %0
 ; CHECK-LABEL: testAddOdd
-; CHECK: xsaddqpo
+; CHECK: xsaddqpo v2, v2, v3
+; CHECK: blr
 }
 
 declare fp128 @llvm.ppc.addf128.round.to.odd(fp128, fp128)
 
-define fp128 @testSubOdd() {
+define fp128 @testSubOdd(fp128 %a, fp128 %b) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = load fp128, fp128* @B, align 16
-  %2 = call fp128 @llvm.ppc.subf128.round.to.odd(fp128 %0, fp128 %1)
-  ret fp128 %2
+  %0 = call fp128 @llvm.ppc.subf128.round.to.odd(fp128 %a, fp128 %b)
+  ret fp128 %0
 ; CHECK-LABEL: testSubOdd
-; CHECK: xssubqpo
+; CHECK: xssubqpo v2, v2, v3
+; CHECK: blr
 }
 
 ; Function Attrs: nounwind readnone
 declare fp128 @llvm.ppc.subf128.round.to.odd(fp128, fp128)
 
 ; Function Attrs: noinline nounwind optnone
-define fp128 @testMulOdd() {
+define fp128 @testMulOdd(fp128 %a, fp128 %b) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = load fp128, fp128* @B, align 16
-  %2 = call fp128 @llvm.ppc.mulf128.round.to.odd(fp128 %0, fp128 %1)
-  ret fp128 %2
+  %0 = call fp128 @llvm.ppc.mulf128.round.to.odd(fp128 %a, fp128 %b)
+  ret fp128 %0
 ; CHECK-LABEL: testMulOdd
-; CHECK: xsmulqpo
+; CHECK: xsmulqpo v2, v2, v3
+; CHECK: blr
 }
 
 ; Function Attrs: nounwind readnone
 declare fp128 @llvm.ppc.mulf128.round.to.odd(fp128, fp128)
 
-define fp128 @testDivOdd() {
+define fp128 @testDivOdd(fp128 %a, fp128 %b) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = load fp128, fp128* @B, align 16
-  %2 = call fp128 @llvm.ppc.divf128.round.to.odd(fp128 %0, fp128 %1)
-  ret fp128 %2
+  %0 = call fp128 @llvm.ppc.divf128.round.to.odd(fp128 %a, fp128 %b)
+  ret fp128 %0
 ; CHECK-LABEL: testDivOdd
-; CHECK: xsdivqpo
+; CHECK: xsdivqpo v2, v2, v3
+; CHECK: blr
 }
 
 declare fp128 @llvm.ppc.divf128.round.to.odd(fp128, fp128)
 
-define double @testTruncOdd() {
+define double @testTruncOdd(fp128 %a) {
 entry:
-  %0 = load fp128, fp128* @A, align 16
-  %1 = call double @llvm.ppc.truncf128.round.to.odd(fp128 %0)
-  ret double %1
-  ; CHECK-LABEL: testTruncOdd
-  ; CHECK: xscvqpdpo
+  %0 = call double @llvm.ppc.truncf128.round.to.odd(fp128 %a)
+  ret double %0
+; CHECK-LABEL: testTruncOdd
+; CHECK: xscvqpdpo v2, v2
+; CHECK: xxlor f1, v2, v2
+; CHECK: blr
 }
 
 declare double @llvm.ppc.truncf128.round.to.odd(fp128)




More information about the llvm-commits mailing list