[llvm] 4f78e02 - [AArch64] Lower scalar sqxtn intrinsics to use fp registers

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 21 02:46:50 PDT 2022


Author: David Green
Date: 2022-09-21T10:46:43+01:00
New Revision: 4f78e022ee3d437f8aefc4d7adb8abe7bb33b9ac

URL: https://github.com/llvm/llvm-project/commit/4f78e022ee3d437f8aefc4d7adb8abe7bb33b9ac
DIFF: https://github.com/llvm/llvm-project/commit/4f78e022ee3d437f8aefc4d7adb8abe7bb33b9ac.diff

LOG: [AArch64] Lower scalar sqxtn intrinsics to use fp registers

The llvm.aarch64.neon.scalar.sqxtn.i32.i64 intrinsics take and return
integer types, but operate on fp registers. This can create some
inefficiencies in their lowering, where the registers are converted to
fp a little too late. This patch adds lowering for the intrinsics,
creating bitcasts to/from fp types to allow nicer folding later when the
instructions are selected, especially around insert/extracts.

Differential Revision: https://reviews.llvm.org/D134024

Added: 
    

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64InstrFormats.td
    llvm/test/CodeGen/AArch64/arm64-arith-saturating.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index da67afd3ae95..264cab077d2e 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -105,7 +105,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
   class AdvSIMD_1VectorArg_Long_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
   class AdvSIMD_1IntArg_Narrow_Intrinsic
-    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
+    : DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>;
   class AdvSIMD_1VectorArg_Narrow_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
   class AdvSIMD_1VectorArg_Int_Across_Intrinsic

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c6639464f11e..e268eabef688 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4673,7 +4673,18 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_neon_umin:
     return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2));
-
+  case Intrinsic::aarch64_neon_scalar_sqxtn:
+  case Intrinsic::aarch64_neon_scalar_sqxtun:
+  case Intrinsic::aarch64_neon_scalar_uqxtn: {
+    assert(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::f32);
+    if (Op.getValueType() == MVT::i32)
+      return DAG.getNode(ISD::BITCAST, dl, MVT::i32,
+                         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::f32,
+                                     Op.getOperand(0),
+                                     DAG.getNode(ISD::BITCAST, dl, MVT::f64,
+                                                 Op.getOperand(1))));
+    return SDValue();
+  }
   case Intrinsic::aarch64_sve_sunpkhi:
     return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
                        Op.getOperand(1));

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 455b874e7e69..da945ca7cb0c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -7207,7 +7207,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
 multiclass SIMDTwoScalarMixedBHS<bit U, bits<5> opc, string asm,
                                  SDPatternOperator OpNode = null_frag> {
   def v1i32  : BaseSIMDTwoScalar<U, 0b10, 0b00, opc, FPR32, FPR64, asm,
-        [(set (i32 FPR32:$Rd), (OpNode (i64 FPR64:$Rn)))]>;
+        [(set (f32 FPR32:$Rd), (OpNode (f64 FPR64:$Rn)))]>;
   def v1i16  : BaseSIMDTwoScalar<U, 0b01, 0b00, opc, FPR16, FPR32, asm, []>;
   def v1i8   : BaseSIMDTwoScalar<U, 0b00, 0b00, opc, FPR8 , FPR16, asm, []>;
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-arith-saturating.ll b/llvm/test/CodeGen/AArch64/arm64-arith-saturating.ll
index 56f24770659b..e2d530ab421e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-arith-saturating.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-arith-saturating.ll
@@ -193,8 +193,7 @@ define i32 @vqmovnd_u(<2 x i64> %b) nounwind readnone {
 define i32 @uqxtn_ext(<4 x i32> noundef %a, <4 x i32> noundef %b, i32 %c, float %d, <2 x i64> %e) {
 ; CHECK-LABEL: uqxtn_ext:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x8, v3.d[1]
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[0], v3.d[1]
 ; CHECK-NEXT:    uqxtn s0, d0
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
@@ -209,8 +208,7 @@ define <4 x i32> @sqxtn_ins(<4 x i32> noundef %a, i64 %c) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmov d1, x0
 ; CHECK-NEXT:    sqxtn s1, d1
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[3], w8
+; CHECK-NEXT:    mov v0.s[3], v1.s[0]
 ; CHECK-NEXT:    ret
 entry:
   %vqmovnd_s64.i = tail call i32 @llvm.aarch64.neon.scalar.sqxtn.i32.i64(i64 %c)
@@ -221,11 +219,9 @@ entry:
 define <4 x i32> @sqxtun_insext(<4 x i32> noundef %a, <2 x i64> %e) {
 ; CHECK-LABEL: sqxtun_insext:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x8, v1.d[1]
-; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v1.d[0], v1.d[1]
 ; CHECK-NEXT:    sqxtun s1, d1
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[3], w8
+; CHECK-NEXT:    mov v0.s[3], v1.s[0]
 ; CHECK-NEXT:    ret
 entry:
   %c = extractelement <2 x i64> %e, i64 1
@@ -239,8 +235,7 @@ define <4 x i32> @saddluse(<4 x i32> noundef %a, <4 x i32> noundef %b, i32 %c, f
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    saddlv d1, v1.4s
 ; CHECK-NEXT:    sqxtn s1, d1
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[1], w8
+; CHECK-NEXT:    mov v0.s[1], v1.s[0]
 ; CHECK-NEXT:    ret
 entry:
   %vaddlvq_s32.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %b)


        


More information about the llvm-commits mailing list