[llvm] 4c3e51e - [AArch64] Handle 64bit vectors in tryCombineFixedPointConvert

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon May 16 03:08:51 PDT 2022


Author: David Green
Date: 2022-05-16T11:08:47+01:00
New Revision: 4c3e51ecfa3337be2d091392d6174449aeb35aa3

URL: https://github.com/llvm/llvm-project/commit/4c3e51ecfa3337be2d091392d6174449aeb35aa3
DIFF: https://github.com/llvm/llvm-project/commit/4c3e51ecfa3337be2d091392d6174449aeb35aa3.diff

LOG: [AArch64] Handle 64bit vectors in tryCombineFixedPointConvert

Under some situations we can visit 64bit vector extract elements in
tryCombineFixedPointConvert, where an assert fires as they are expected
to have been converted to 128bit. Turn the assert into an if statement,
bailing out and letting the extract be handled first.

Also invert some ifs, using early exits to reduce indentation.

Fixes #55417

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 023e91f33e0f5..ae920d33e982b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15008,33 +15008,34 @@ static SDValue tryCombineFixedPointConvert(SDNode *N,
 
   // Check the operand and see if it originates from a lane extract.
   SDValue Op1 = N->getOperand(1);
-  if (Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
-    // Yep, no additional predication needed. Perform the transform.
-    SDValue IID = N->getOperand(0);
-    SDValue Shift = N->getOperand(2);
-    SDValue Vec = Op1.getOperand(0);
-    SDValue Lane = Op1.getOperand(1);
-    EVT ResTy = N->getValueType(0);
-    EVT VecResTy;
-    SDLoc DL(N);
+  if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+    return SDValue();
 
-    // The vector width should be 128 bits by the time we get here, even
-    // if it started as 64 bits (the extract_vector handling will have
-    // done so).
-    assert(Vec.getValueSizeInBits() == 128 &&
-           "unexpected vector size on extract_vector_elt!");
-    if (Vec.getValueType() == MVT::v4i32)
-      VecResTy = MVT::v4f32;
-    else if (Vec.getValueType() == MVT::v2i64)
-      VecResTy = MVT::v2f64;
-    else
-      llvm_unreachable("unexpected vector type!");
+  // Yep, no additional predication needed. Perform the transform.
+  SDValue IID = N->getOperand(0);
+  SDValue Shift = N->getOperand(2);
+  SDValue Vec = Op1.getOperand(0);
+  SDValue Lane = Op1.getOperand(1);
+  EVT ResTy = N->getValueType(0);
+  EVT VecResTy;
+  SDLoc DL(N);
 
-    SDValue Convert =
-        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
-    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
-  }
-  return SDValue();
+  // The vector width should be 128 bits by the time we get here, even
+  // if it started as 64 bits (the extract_vector handling will have
+  // done so). Bail if it is not.
+  if (Vec.getValueSizeInBits() != 128)
+    return SDValue();
+
+  if (Vec.getValueType() == MVT::v4i32)
+    VecResTy = MVT::v4f32;
+  else if (Vec.getValueType() == MVT::v2i64)
+    VecResTy = MVT::v2f64;
+  else
+    llvm_unreachable("unexpected vector type!");
+
+  SDValue Convert =
+      DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
 }
 
 // AArch64 high-vector "long" operations are formed by performing the non-high

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll b/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
index 34dd15b268d34..b068edc066760 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 ; DAGCombine to transform a conversion of an extract_vector_elt to an
@@ -5,11 +6,42 @@
 ; of the value to a GPR and back to and FPR.
 ; rdar://11855286
 define double @foo0(<2 x i64> %a) nounwind {
-; CHECK:  scvtf.2d  [[REG:v[0-9]+]], v0, #9
-; CHECK-NEXT:  mov  d0, [[REG]][1]
+; CHECK-LABEL: foo0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf.2d v0, v0, #9
+; CHECK-NEXT:    mov d0, v0[1]
+; CHECK-NEXT:    ret
   %vecext = extractelement <2 x i64> %a, i32 1
   %fcvt_n = tail call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %vecext, i32 9)
   ret double %fcvt_n
 }
 
-declare double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64, i32) nounwind readnone
+define double @bar(ptr %iVals, ptr %fVals, ptr %dVals) {
+; CHECK-LABEL: bar:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr d0, [x2, #128]
+; CHECK-NEXT:    frinti d0, d0
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    sri d0, d0, #1
+; CHECK-NEXT:    scvtf.2d v0, v0, #1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+entry:
+  %arrayidx = getelementptr inbounds double, ptr %dVals, i64 16
+  %0 = load <1 x double>, ptr %arrayidx, align 8
+  %vrndi_v1.i = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %0)
+  %vget_lane = extractelement <1 x double> %vrndi_v1.i, i64 0
+  %vcvtd_s64_f64.i = call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %vget_lane)
+  %1 = insertelement <1 x i64> poison, i64 %vcvtd_s64_f64.i, i64 0
+  %vsrid_n_s647 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %1, <1 x i64> %1, i32 1)
+  %2 = extractelement <1 x i64> %vsrid_n_s647, i64 0
+  %vcvtd_n_f64_s64 = call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %2, i32 1)
+  ret double %vcvtd_n_f64_s64
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64>, <1 x i64>, i32)
+declare double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64, i32)
+declare <1 x double> @llvm.nearbyint.v1f64(<1 x double>)
+declare i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double)
+


        


More information about the llvm-commits mailing list