[PATCH] D48218: [Power9] Add remaining __flaot128 builtin support for FMA round to odd

Stefan Pintilie via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 15 07:00:51 PDT 2018


stefanp created this revision.
stefanp added reviewers: nemanjai, kbarton, hfinkel, syzaara, sfertile, lei.
stefanp added a dependency: D47550: [Power9] Add __float128 builtins for Round To Odd.

Implement this as it is done on GCC:

  __float128 a, b, c, d;
  a = __builtin_fmaf128_round_to_odd (b, c, d);                      // generates xsmaddqpo
  a = __builtin_fmaf128_round_to_odd (b, c, -d);                     // generates xsmsubqpo
  a = - __builtin_fmaf128_round_to_odd (b, c, d);                    // generates xsnmaddqpo
  a = - __builtin_fmaf128_round_to_odd (b, c, -d);                   // generates xsnmsubpqp


https://reviews.llvm.org/D48218

Files:
  lib/Target/PowerPC/PPCInstrVSX.td
  test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll


Index: test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
===================================================================
--- test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
+++ test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll
@@ -4,6 +4,10 @@
 @A = common global fp128 0xL00000000000000000000000000000000, align 16
 @B = common global fp128 0xL00000000000000000000000000000000, align 16
 @C = common global fp128 0xL00000000000000000000000000000000, align 16
+ at D = common global fp128 0xL00000000000000000000000000000000, align 16
+ at E = common global fp128 0xL00000000000000000000000000000000, align 16
+ at F = common global fp128 0xL00000000000000000000000000000000, align 16
+ at G = common global fp128 0xL00000000000000000000000000000000, align 16
 
 define fp128 @testSqrtOdd() {
 entry:
@@ -16,15 +20,39 @@
 
 declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128)
 
-define fp128 @testFMAOdd() {
+define void @testFMAOdd() {
 entry:
   %0 = load fp128, fp128* @A, align 16
   %1 = load fp128, fp128* @B, align 16
   %2 = load fp128, fp128* @C, align 16
   %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %0, fp128 %1, fp128 %2)
-  ret fp128 %3
+  store fp128 %3, fp128* @D, align 16
+  %4 = load fp128, fp128* @A, align 16
+  %5 = load fp128, fp128* @B, align 16
+  %6 = load fp128, fp128* @C, align 16
+  %sub = fsub fp128 0xL00000000000000008000000000000000, %6
+  %7 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %4, fp128 %5, fp128 %sub)
+  store fp128 %7, fp128* @E, align 16
+  %8 = load fp128, fp128* @A, align 16
+  %9 = load fp128, fp128* @B, align 16
+  %10 = load fp128, fp128* @C, align 16
+  %11 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %8, fp128 %9, fp128 %10)
+  %sub1 = fsub fp128 0xL00000000000000008000000000000000, %11
+  store fp128 %sub1, fp128* @F, align 16
+  %12 = load fp128, fp128* @A, align 16
+  %13 = load fp128, fp128* @B, align 16
+  %14 = load fp128, fp128* @C, align 16
+  %sub2 = fsub fp128 0xL00000000000000008000000000000000, %14
+  %15 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %12, fp128 %13, fp128 %sub2)
+  %sub3 = fsub fp128 0xL00000000000000008000000000000000, %15
+  store fp128 %sub3, fp128* @G, align 16
+  ret void
 ; CHECK-LABEL: testFMAOdd
-; CHECK: xsmaddqpo
+; CHECK-DAG: xsmaddqpo
+; CHECK-DAG: xsmsubqpo
+; CHECK-DAG: xsnmaddqpo
+; CHECK-DAG: xsnmsubqpo
+; CHECK: blr
 }
 
 declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128)
Index: lib/Target/PowerPC/PPCInstrVSX.td
===================================================================
--- lib/Target/PowerPC/PPCInstrVSX.td
+++ lib/Target/PowerPC/PPCInstrVSX.td
@@ -2484,17 +2484,26 @@
                                        [(set f128:$vT,
                                              (fma f128:$vA, f128:$vB,
                                                   (fneg f128:$vTi)))]>;
-  def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" , []>;
+  def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" ,
+                                      [(set f128:$vT,
+                                      (int_ppc_fmaf128_round_to_odd
+                                      f128:$vA, f128:$vB, (fneg f128:$vTi)))]>;
   def XSNMADDQP : X_VT5_VA5_VB5_FMA <63, 452, "xsnmaddqp",
                                      [(set f128:$vT,
                                            (fneg (fma f128:$vA, f128:$vB,
                                                       f128:$vTi)))]>;
-  def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo", []>;
+  def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo",
+                                      [(set f128:$vT,
+                                      (fneg (int_ppc_fmaf128_round_to_odd
+                                      f128:$vA, f128:$vB, f128:$vTi)))]>;
   def XSNMSUBQP : X_VT5_VA5_VB5_FMA <63, 484, "xsnmsubqp",
                                      [(set f128:$vT,
                                            (fneg (fma f128:$vA, f128:$vB,
                                                       (fneg f128:$vTi))))]>;
-  def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo", []>;
+  def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo",
+                                      [(set f128:$vT,
+                                      (fneg (int_ppc_fmaf128_round_to_odd
+                                      f128:$vA, f128:$vB, (fneg f128:$vTi))))]>;
 
   // Additional fnmsub patterns: -a*c + b == -(a*c - b)
   def : Pat<(fma (fneg f128:$A), f128:$C, f128:$B), (XSNMSUBQP $B, $C, $A)>;


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D48218.151502.patch
Type: text/x-patch
Size: 4498 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20180615/a7279d5e/attachment.bin>


More information about the llvm-commits mailing list