[llvm] r359015 - [AArch64][GlobalISel] Legalize G_FMA for more vector types

Jessica Paquette via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 23 10:37:57 PDT 2019


Author: paquette
Date: Tue Apr 23 10:37:56 2019
New Revision: 359015

URL: http://llvm.org/viewvc/llvm-project?rev=359015&view=rev
Log:
[AArch64][GlobalISel] Legalize G_FMA for more vector types

Same as G_FCEIL, G_FABS, etc. Just move it into that rule.

Add a legalizer test for G_FMA, which we didn't have before and update
arm64-vfloatintrinsics.ll.

Added:
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
    llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp?rev=359015&r1=359014&r2=359015&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp Tue Apr 23 10:37:56 2019
@@ -125,12 +125,13 @@ AArch64LegalizerInfo::AArch64LegalizerIn
   getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO})
       .legalFor({{s32, s1}, {s64, s1}});
 
-  getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMA, G_FMUL, G_FDIV, G_FNEG})
+  getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG})
     .legalFor({s32, s64, v2s64, v4s32, v2s32});
 
   getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64});
 
-  getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT})
+  getActionDefinitionsBuilder(
+      {G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT, G_FMA})
       // If we don't have full FP16 support, then scalarize the elements of
       // vectors containing fp16 types.
       .fewerElementsIf(

Added: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir?rev=359015&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir (added)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir Tue Apr 23 10:37:56 2019
@@ -0,0 +1,233 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc %s -verify-machineinstrs -mtriple=aarch64-unknown-unknown -run-pass=legalizer -simplify-mir -aarch64-neon-syntax=apple -mattr=-fullfp16 -o - | FileCheck %s --check-prefix=NO-FP16
+# RUN: llc %s -verify-machineinstrs -mtriple=aarch64-unknown-unknown -run-pass=legalizer -simplify-mir -aarch64-neon-syntax=apple -mattr=+fullfp16 -o - | FileCheck %s --check-prefix=FP16
+...
+---
+name:            test_v4f16.fma
+alignment:       2
+tracksRegLiveness: true
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    liveins: $d0, $d1, $d2
+
+    ; NO-FP16-LABEL: name: test_v4f16.fma
+    ; NO-FP16: liveins: $d0, $d1, $d2
+    ; NO-FP16: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; NO-FP16: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $d1
+    ; NO-FP16: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $d2
+    ; NO-FP16: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; NO-FP16: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+    ; NO-FP16: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>)
+    ; NO-FP16: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+    ; NO-FP16: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+    ; NO-FP16: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16)
+    ; NO-FP16: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]]
+    ; NO-FP16: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32)
+    ; NO-FP16: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+    ; NO-FP16: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+    ; NO-FP16: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16)
+    ; NO-FP16: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]]
+    ; NO-FP16: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32)
+    ; NO-FP16: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+    ; NO-FP16: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+    ; NO-FP16: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16)
+    ; NO-FP16: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]]
+    ; NO-FP16: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32)
+    ; NO-FP16: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+    ; NO-FP16: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+    ; NO-FP16: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16)
+    ; NO-FP16: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]]
+    ; NO-FP16: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32)
+    ; NO-FP16: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16)
+    ; NO-FP16: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+    ; NO-FP16: RET_ReallyLR implicit $d0
+    ; FP16-LABEL: name: test_v4f16.fma
+    ; FP16: liveins: $d0, $d1, $d2
+    ; FP16: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; FP16: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $d1
+    ; FP16: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $d2
+    ; FP16: [[FMA:%[0-9]+]]:_(<4 x s16>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; FP16: $d0 = COPY [[FMA]](<4 x s16>)
+    ; FP16: RET_ReallyLR implicit $d0
+    %0:_(<4 x s16>) = COPY $d0
+    %1:_(<4 x s16>) = COPY $d1
+    %2:_(<4 x s16>) = COPY $d2
+    %3:_(<4 x s16>) = G_FMA %0, %1, %2
+    $d0 = COPY %3(<4 x s16>)
+    RET_ReallyLR implicit $d0
+
+...
+---
+name:            test_v8f16.fma
+alignment:       2
+tracksRegLiveness: true
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    liveins: $q0, $q1, $q2
+
+    ; NO-FP16-LABEL: name: test_v8f16.fma
+    ; NO-FP16: liveins: $q0, $q1, $q2
+    ; NO-FP16: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; NO-FP16: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+    ; NO-FP16: [[COPY2:%[0-9]+]]:_(<8 x s16>) = COPY $q2
+    ; NO-FP16: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
+    ; NO-FP16: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<8 x s16>)
+    ; NO-FP16: [[UV16:%[0-9]+]]:_(s16), [[UV17:%[0-9]+]]:_(s16), [[UV18:%[0-9]+]]:_(s16), [[UV19:%[0-9]+]]:_(s16), [[UV20:%[0-9]+]]:_(s16), [[UV21:%[0-9]+]]:_(s16), [[UV22:%[0-9]+]]:_(s16), [[UV23:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<8 x s16>)
+    ; NO-FP16: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+    ; NO-FP16: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16)
+    ; NO-FP16: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV16]](s16)
+    ; NO-FP16: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]]
+    ; NO-FP16: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32)
+    ; NO-FP16: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+    ; NO-FP16: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16)
+    ; NO-FP16: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV17]](s16)
+    ; NO-FP16: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]]
+    ; NO-FP16: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32)
+    ; NO-FP16: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+    ; NO-FP16: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16)
+    ; NO-FP16: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV18]](s16)
+    ; NO-FP16: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]]
+    ; NO-FP16: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32)
+    ; NO-FP16: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+    ; NO-FP16: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16)
+    ; NO-FP16: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV19]](s16)
+    ; NO-FP16: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]]
+    ; NO-FP16: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32)
+    ; NO-FP16: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+    ; NO-FP16: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[UV12]](s16)
+    ; NO-FP16: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[UV20]](s16)
+    ; NO-FP16: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FPEXT12]], [[FPEXT13]], [[FPEXT14]]
+    ; NO-FP16: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA4]](s32)
+    ; NO-FP16: [[FPEXT15:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+    ; NO-FP16: [[FPEXT16:%[0-9]+]]:_(s32) = G_FPEXT [[UV13]](s16)
+    ; NO-FP16: [[FPEXT17:%[0-9]+]]:_(s32) = G_FPEXT [[UV21]](s16)
+    ; NO-FP16: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FPEXT15]], [[FPEXT16]], [[FPEXT17]]
+    ; NO-FP16: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA5]](s32)
+    ; NO-FP16: [[FPEXT18:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+    ; NO-FP16: [[FPEXT19:%[0-9]+]]:_(s32) = G_FPEXT [[UV14]](s16)
+    ; NO-FP16: [[FPEXT20:%[0-9]+]]:_(s32) = G_FPEXT [[UV22]](s16)
+    ; NO-FP16: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FPEXT18]], [[FPEXT19]], [[FPEXT20]]
+    ; NO-FP16: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA6]](s32)
+    ; NO-FP16: [[FPEXT21:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+    ; NO-FP16: [[FPEXT22:%[0-9]+]]:_(s32) = G_FPEXT [[UV15]](s16)
+    ; NO-FP16: [[FPEXT23:%[0-9]+]]:_(s32) = G_FPEXT [[UV23]](s16)
+    ; NO-FP16: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FPEXT21]], [[FPEXT22]], [[FPEXT23]]
+    ; NO-FP16: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA7]](s32)
+    ; NO-FP16: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16), [[FPTRUNC4]](s16), [[FPTRUNC5]](s16), [[FPTRUNC6]](s16), [[FPTRUNC7]](s16)
+    ; NO-FP16: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
+    ; NO-FP16: RET_ReallyLR implicit $q0
+    ; FP16-LABEL: name: test_v8f16.fma
+    ; FP16: liveins: $q0, $q1, $q2
+    ; FP16: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; FP16: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+    ; FP16: [[COPY2:%[0-9]+]]:_(<8 x s16>) = COPY $q2
+    ; FP16: [[FMA:%[0-9]+]]:_(<8 x s16>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; FP16: $q0 = COPY [[FMA]](<8 x s16>)
+    ; FP16: RET_ReallyLR implicit $q0
+    %0:_(<8 x s16>) = COPY $q0
+    %1:_(<8 x s16>) = COPY $q1
+    %2:_(<8 x s16>) = COPY $q2
+    %3:_(<8 x s16>) = G_FMA %0, %1, %2
+    $q0 = COPY %3(<8 x s16>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            test_v2f32.fma
+alignment:       2
+tracksRegLiveness: true
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    liveins: $d0, $d1, $d2
+
+    ; NO-FP16-LABEL: name: test_v2f32.fma
+    ; NO-FP16: liveins: $d0, $d1, $d2
+    ; NO-FP16: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; NO-FP16: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; NO-FP16: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $d2
+    ; NO-FP16: [[FMA:%[0-9]+]]:_(<2 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; NO-FP16: $d0 = COPY [[FMA]](<2 x s32>)
+    ; NO-FP16: RET_ReallyLR implicit $d0
+    ; FP16-LABEL: name: test_v2f32.fma
+    ; FP16: liveins: $d0, $d1, $d2
+    ; FP16: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; FP16: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; FP16: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $d2
+    ; FP16: [[FMA:%[0-9]+]]:_(<2 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; FP16: $d0 = COPY [[FMA]](<2 x s32>)
+    ; FP16: RET_ReallyLR implicit $d0
+    %0:_(<2 x s32>) = COPY $d0
+    %1:_(<2 x s32>) = COPY $d1
+    %2:_(<2 x s32>) = COPY $d2
+    %3:_(<2 x s32>) = G_FMA %0, %1, %2
+    $d0 = COPY %3(<2 x s32>)
+    RET_ReallyLR implicit $d0
+
+...
+---
+name:            test_v4f32.fma
+alignment:       2
+tracksRegLiveness: true
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    liveins: $q0, $q1, $q2
+
+    ; NO-FP16-LABEL: name: test_v4f32.fma
+    ; NO-FP16: liveins: $q0, $q1, $q2
+    ; NO-FP16: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; NO-FP16: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; NO-FP16: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $q2
+    ; NO-FP16: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; NO-FP16: $q0 = COPY [[FMA]](<4 x s32>)
+    ; NO-FP16: RET_ReallyLR implicit $q0
+    ; FP16-LABEL: name: test_v4f32.fma
+    ; FP16: liveins: $q0, $q1, $q2
+    ; FP16: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; FP16: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; FP16: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $q2
+    ; FP16: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; FP16: $q0 = COPY [[FMA]](<4 x s32>)
+    ; FP16: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s32>) = COPY $q2
+    %3:_(<4 x s32>) = G_FMA %0, %1, %2
+    $q0 = COPY %3(<4 x s32>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            test_v2f64.fma
+alignment:       2
+tracksRegLiveness: true
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    liveins: $q0, $q1, $q2
+
+    ; NO-FP16-LABEL: name: test_v2f64.fma
+    ; NO-FP16: liveins: $q0, $q1, $q2
+    ; NO-FP16: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; NO-FP16: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; NO-FP16: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
+    ; NO-FP16: [[FMA:%[0-9]+]]:_(<2 x s64>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; NO-FP16: $q0 = COPY [[FMA]](<2 x s64>)
+    ; NO-FP16: RET_ReallyLR implicit $q0
+    ; FP16-LABEL: name: test_v2f64.fma
+    ; FP16: liveins: $q0, $q1, $q2
+    ; FP16: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; FP16: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; FP16: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
+    ; FP16: [[FMA:%[0-9]+]]:_(<2 x s64>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; FP16: $q0 = COPY [[FMA]](<2 x s64>)
+    ; FP16: RET_ReallyLR implicit $q0
+    %0:_(<2 x s64>) = COPY $q0
+    %1:_(<2 x s64>) = COPY $q1
+    %2:_(<2 x s64>) = COPY $q2
+    %3:_(<2 x s64>) = G_FMA %0, %1, %2
+    $q0 = COPY %3(<2 x s64>)
+    RET_ReallyLR implicit $q0

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir?rev=359015&r1=359014&r2=359015&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir Tue Apr 23 10:37:56 2019
@@ -244,7 +244,7 @@
 # DEBUG:      .. the first uncovered type index: 1, OK
 #
 # DEBUG-NEXT: G_FMA (opcode {{[0-9]+}}): 1 type index
-# DEBUG:      .. the first uncovered type index: 1, OK
+# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected
 #
 # DEBUG-NEXT: G_FDIV (opcode {{[0-9]+}}): 1 type index
 # DEBUG:      .. the first uncovered type index: 1, OK

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll?rev=359015&r1=359014&r2=359015&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll Tue Apr 23 10:37:56 2019
@@ -120,11 +120,17 @@ define %v4f16 @test_v4f16.log2(%v4f16 %a
   %1 = call %v4f16 @llvm.log2.v4f16(%v4f16 %a)
   ret %v4f16 %1
 }
+
+; FALLBACK-NOT: remark{{.*}}test_v4f16.fma
 define %v4f16 @test_v4f16.fma(%v4f16 %a, %v4f16 %b, %v4f16 %c) {
   ; CHECK-LABEL:          test_v4f16.fma:
   ; CHECK-NOFP16-COUNT-4: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
   ; CHECK-FP16-NOT:       fcvt
   ; CHECK-FP16:           fmla.4h
+  ; GISEL-LABEL:          test_v4f16.fma:
+  ; GISEL-NOFP16-COUNT-4: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+  ; GISEL-FP16-NOT:       fcvt
+  ; GISEL-FP16:           fmla.4h
   %1 = call %v4f16 @llvm.fma.v4f16(%v4f16 %a, %v4f16 %b, %v4f16 %c)
   ret %v4f16 %1
 }
@@ -352,11 +358,17 @@ define %v8f16 @test_v8f16.log2(%v8f16 %a
   %1 = call %v8f16 @llvm.log2.v8f16(%v8f16 %a)
   ret %v8f16 %1
 }
+
+; FALLBACK-NOT: remark{{.*}}test_v8f16.fma
 define %v8f16 @test_v8f16.fma(%v8f16 %a, %v8f16 %b, %v8f16 %c) {
   ; CHECK-LABEL:          test_v8f16.fma:
   ; CHECK-NOFP16-COUNT-8: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
   ; CHECK-FP16-NOT:       fcvt
   ; CHECK-FP16:           fmla.8h
+  ; GISEL-LABEL:          test_v8f16.fma:
+  ; GISEL-NOFP16-COUNT-8: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+  ; GISEL-FP16-NOT:       fcvt
+  ; GISEL-FP16:           fmla.8h
   %1 = call %v8f16 @llvm.fma.v8f16(%v8f16 %a, %v8f16 %b, %v8f16 %c)
   ret %v8f16 %1
 }
@@ -564,9 +576,13 @@ define %v2f32 @test_v2f32.log2(%v2f32 %a
   %1 = call %v2f32 @llvm.log2.v2f32(%v2f32 %a)
   ret %v2f32 %1
 }
+
+; FALLBACK-NOT: remark{{.*}}test_v2f32.fma
 ; CHECK-LABEL: test_v2f32.fma:
+; GISEL-LABEL: test_v2f32.fma:
 define %v2f32 @test_v2f32.fma(%v2f32 %a, %v2f32 %b, %v2f32 %c) {
   ; CHECK: fmla.2s
+  ; GISEL: fmla.2s
   %1 = call %v2f32 @llvm.fma.v2f32(%v2f32 %a, %v2f32 %b, %v2f32 %c)
   ret %v2f32 %1
 }
@@ -730,9 +746,13 @@ define %v4f32 @test_v4f32.log2(%v4f32 %a
   %1 = call %v4f32 @llvm.log2.v4f32(%v4f32 %a)
   ret %v4f32 %1
 }
+
+; FALLBACK-NOT: remark{{.*}}test_v4f32.fma
 ; CHECK: test_v4f32.fma:
+; GISEL: test_v4f32.fma:
 define %v4f32 @test_v4f32.fma(%v4f32 %a, %v4f32 %b, %v4f32 %c) {
   ; CHECK: fma
+  ; GISEL: fma
   %1 = call %v4f32 @llvm.fma.v4f32(%v4f32 %a, %v4f32 %b, %v4f32 %c)
   ret %v4f32 %1
 }
@@ -896,9 +916,13 @@ define %v2f64 @test_v2f64.log2(%v2f64 %a
   %1 = call %v2f64 @llvm.log2.v2f64(%v2f64 %a)
   ret %v2f64 %1
 }
+
+; FALLBACK-NOT: remark{{.*}}test_v2f64.fma
 ; CHECK: test_v2f64.fma:
+; GISEL: test_v2f64.fma:
 define %v2f64 @test_v2f64.fma(%v2f64 %a, %v2f64 %b, %v2f64 %c) {
   ; CHECK: fma
+  ; GISEL: fma
   %1 = call %v2f64 @llvm.fma.v2f64(%v2f64 %a, %v2f64 %b, %v2f64 %c)
   ret %v2f64 %1
 }




More information about the llvm-commits mailing list