[llvm] 407a338 - [AArch64][SVE] Fix isel failure for FP-extending loads

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Mon May 10 03:56:35 PDT 2021


Author: Sander de Smalen
Date: 2021-05-10T11:27:38+01:00
New Revision: 407a33889de69c54bf4c0945f94a8417cf08e250

URL: https://github.com/llvm/llvm-project/commit/407a33889de69c54bf4c0945f94a8417cf08e250
DIFF: https://github.com/llvm/llvm-project/commit/407a33889de69c54bf4c0945f94a8417cf08e250.diff

LOG: [AArch64][SVE] Fix isel failure for FP-extending loads

DAGCombiner tries to combine a (fpext (load)) to (fround (extload))
but SVE has no FP-extending loads. By marking these as expand,
the combine no longer happens.

This also fixes a similar issue for fptrunc, where the source type
is not a legal type.

Reviewed By: bsmith, kmclaughlin

Differential Revision: https://reviews.llvm.org/D102053

Added: 
    llvm/test/CodeGen/AArch64/sve-fpext-load.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4b1e6bdcd690..6b1105d98e2a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1185,15 +1185,20 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       }
     }
 
-    for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
-                    MVT::nxv4f32, MVT::nxv2f64}) {
-      for (auto InnerVT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16,
-                           MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64}) {
+    for (MVT VT : MVT::fp_scalable_vector_valuetypes()) {
+      for (MVT InnerVT : MVT::fp_scalable_vector_valuetypes()) {
         // Avoid marking truncating FP stores as legal to prevent the
         // DAGCombiner from creating unsupported truncating stores.
         setTruncStoreAction(VT, InnerVT, Expand);
+        // SVE does not have floating-point extending loads.
+        setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
+        setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
+        setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
       }
+    }
 
+    for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
+                    MVT::nxv4f32, MVT::nxv2f64}) {
       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
       setOperationAction(ISD::MGATHER, VT, Custom);

diff  --git a/llvm/test/CodeGen/AArch64/sve-fpext-load.ll b/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
new file mode 100644
index 000000000000..913230eebe8b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
@@ -0,0 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+; fpext <vscale x 2 x half> -> <vscale x 2 x double>
+define <vscale x 2 x double> @ext2_f16_f64(<vscale x 2 x half> *%ptr, i64 %index) {
+; CHECK-LABEL: ext2_f16_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %load = load <vscale x 2 x half>, <vscale x 2 x half>* %ptr, align 4
+  %load.ext = fpext <vscale x 2 x half> %load to <vscale x 2 x double>
+  ret <vscale x 2 x double> %load.ext
+}
+
+; fpext <vscale x 4 x half> -> <vscale x 4 x double>
+define <vscale x 4 x double> @ext4_f16_f64(<vscale x 4 x half> *%ptr, i64 %index) {
+; CHECK-LABEL: ext4_f16_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    fcvt z0.d, p0/m, z1.h
+; CHECK-NEXT:    fcvt z1.d, p0/m, z2.h
+; CHECK-NEXT:    ret
+  %load = load <vscale x 4 x half>, <vscale x 4 x half>* %ptr, align 4
+  %load.ext = fpext <vscale x 4 x half> %load to <vscale x 4 x double>
+  ret <vscale x 4 x double> %load.ext
+}
+
+; fpext <vscale x 8 x half> -> <vscale x 8 x double>
+define <vscale x 8 x double> @ext8_f16_f64(<vscale x 8 x half> *%ptr, i64 %index) {
+; CHECK-LABEL: ext8_f16_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z3.d, z0.s
+; CHECK-NEXT:    uunpkhi z4.d, z0.s
+; CHECK-NEXT:    fcvt z0.d, p0/m, z2.h
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
+; CHECK-NEXT:    fcvt z2.d, p0/m, z3.h
+; CHECK-NEXT:    fcvt z3.d, p0/m, z4.h
+; CHECK-NEXT:    ret
+  %load = load <vscale x 8 x half>, <vscale x 8 x half>* %ptr, align 4
+  %load.ext = fpext <vscale x 8 x half> %load to <vscale x 8 x double>
+  ret <vscale x 8 x double> %load.ext
+}
+
+; fpext <vscale x 2 x float> -> <vscale x 2 x double>
+define <vscale x 2 x double> @ext2_f32_f64(<vscale x 2 x float> *%ptr, i64 %index) {
+; CHECK-LABEL: ext2_f32_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %load = load <vscale x 2 x float>, <vscale x 2 x float>* %ptr, align 4
+  %load.ext = fpext <vscale x 2 x float> %load to <vscale x 2 x double>
+  ret <vscale x 2 x double> %load.ext
+}
+
+; fpext <vscale x 4 x float> -> <vscale x 4 x double>
+define <vscale x 4 x double> @ext4_f32_f64(<vscale x 4 x float> *%ptr, i64 %index) {
+; CHECK-LABEL: ext4_f32_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    fcvt z0.d, p0/m, z1.s
+; CHECK-NEXT:    fcvt z1.d, p0/m, z2.s
+; CHECK-NEXT:    ret
+  %load = load <vscale x 4 x float>, <vscale x 4 x float>* %ptr, align 4
+  %load.ext = fpext <vscale x 4 x float> %load to <vscale x 4 x double>
+  ret <vscale x 4 x double> %load.ext
+}

diff  --git a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
index 41bca595a25e..8e96eefa1594 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
@@ -60,3 +60,28 @@ entry:
   store <vscale x 2 x half> %1, <vscale x 2 x half>* %dst, align 2
   ret void
 }
+
+define void @fptrunc8_f64_f16(<vscale x 8 x half> *%dst, <vscale x 8 x double> *%src) {
+; CHECK-LABEL: fptrunc8_f64_f16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x1, #2, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x1, #3, mul vl]
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK-NEXT:    fcvt z2.h, p0/m, z2.d
+; CHECK-NEXT:    fcvt z3.h, p0/m, z3.d
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load <vscale x 8 x double>, <vscale x 8 x double>* %src, align 8
+  %1 = fptrunc <vscale x 8 x double> %0 to <vscale x 8 x half>
+  store <vscale x 8 x half> %1, <vscale x 8 x half>* %dst, align 2
+  ret void
+}


        


More information about the llvm-commits mailing list