[llvm] [AArch64] Codegen for new SCVTF/UCVTF variants (FEAT_FPRCVT) (PR #123767)

Virginia Cangelosi via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 21 07:33:19 PST 2025


https://github.com/virginia-cangelosi created https://github.com/llvm/llvm-project/pull/123767

Adds patterns of new SCVTF/UCVTF instructions to tablegen, with associated test .ll file.

>From a73efaf3850aa3c1addfe6db3fb6e13c6c3de808 Mon Sep 17 00:00:00 2001
From: Virginia Cangelosi <virginia.cangelosi at arm.com>
Date: Tue, 21 Jan 2025 11:02:13 +0000
Subject: [PATCH] [AArch64] Codegen for new SCVTF/UCVTF variants (FEAT_FPRCVT)

---
 .../lib/Target/AArch64/AArch64InstrFormats.td |  12 +-
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |   4 +-
 llvm/test/CodeGen/AArch64/fprcvt-cvtf.ll      | 159 ++++++++++++++++++
 3 files changed, 171 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/fprcvt-cvtf.ll

diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 6a3a9492e031c6..d2a1bcee00291b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -5487,7 +5487,7 @@ multiclass IntegerToFP<bits<2> rmode, bits<3> opcode, string asm, SDPatternOpera
   }
 }
 
-multiclass IntegerToFPSIMDScalar<bits<2> rmode, bits<3> opcode, string asm, SDPatternOperator node = null_frag> {
+multiclass IntegerToFPSIMDScalar<bits<2> rmode, bits<3> opcode, string asm, SDPatternOperator op, SDPatternOperator node = null_frag> {
   // 32-bit to half-precision
   def HSr: BaseIntegerToFPUnscaled<rmode, opcode, FPR32, FPR16, f16, asm, node> {
     let Inst{31} = 0; // 32-bit FPR flag
@@ -5511,6 +5511,15 @@ multiclass IntegerToFPSIMDScalar<bits<2> rmode, bits<3> opcode, string asm, SDPa
     let Inst{31} = 1; // 64-bit FPR flag
     let Inst{23-22} = 0b00; // 32-bit FPR flag
   }
+
+  def : Pat<(f16 (any_fpround (f32 (op (i32 FPR32:$Rn))))),
+          (!cast<Instruction>(NAME # HSr) $Rn)>;
+  def : Pat<(f64 (op (i32 (extractelt (v4i32 V128:$Rn), (i64 0))))),
+          (!cast<Instruction>(NAME # DSr) (EXTRACT_SUBREG $Rn, ssub))>;
+  def : Pat<(f16 (any_fpround (f32 (op (i64 FPR64:$Rn))))),
+          (!cast<Instruction>(NAME # HDr) $Rn)>;
+  def : Pat<(f32 (op (i64 (extractelt (v2i64 V128:$Rn), (i64 0))))),
+          (!cast<Instruction>(NAME # SDr) (EXTRACT_SUBREG $Rn, dsub))>;
 }
 
 //---
@@ -13270,4 +13279,3 @@ multiclass SIMDThreeSameVectorFP8MatrixMul<string asm>{
       let Predicates = [HasNEON, HasF8F32MM];
     }
 }
-
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 8e575abf83d449..e9d2fd2916f5ba 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5060,8 +5060,8 @@ defm SCVTF : IntegerToFP<0b00, 0b010, "scvtf", any_sint_to_fp>;
 defm UCVTF : IntegerToFP<0b00, 0b011, "ucvtf", any_uint_to_fp>;
 
 let Predicates = [HasNEON, HasFPRCVT] in {
-  defm SCVTF : IntegerToFPSIMDScalar<0b11, 0b100, "scvtf">;
-  defm UCVTF : IntegerToFPSIMDScalar<0b11, 0b101, "ucvtf">;
+  defm SCVTF : IntegerToFPSIMDScalar<0b11, 0b100, "scvtf", any_sint_to_fp>;
+  defm UCVTF : IntegerToFPSIMDScalar<0b11, 0b101, "ucvtf", any_uint_to_fp>;
 }
 
 def : Pat<(f16 (fdiv (f16 (any_sint_to_fp (i32 GPR32:$Rn))), fixedpoint_f16_i32:$scale)),
diff --git a/llvm/test/CodeGen/AArch64/fprcvt-cvtf.ll b/llvm/test/CodeGen/AArch64/fprcvt-cvtf.ll
new file mode 100644
index 00000000000000..75fc6b65f024d5
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fprcvt-cvtf.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+neon,+fprcvt -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mattr=+neon -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-NO-FPRCVT
+
+target triple = "aarch64-unknown-linux-gnu"
+
+
+; To demonstrate what we have implemented, we'll want a scalar integer value in a SIMD/FP register.
+; A common case for this setup is when using the result of an integer reduction intrinsic.
+
+; SCVTF
+
+define half @scvtf_f16i32(<4 x i32> %x) {
+; CHECK-LABEL: scvtf_f16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    scvtf h0, s0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: scvtf_f16i32:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addv s0, v0.4s
+; CHECK-NO-FPRCVT-NEXT:    scvtf s0, s0
+; CHECK-NO-FPRCVT-NEXT:    fcvt h0, s0
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addv = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %x)
+ %conv = sitofp i32 %addv to half
+ ret half %conv
+}
+
+define double @scvtf_f64i32(<4 x i32> %x) {
+; CHECK-LABEL: scvtf_f64i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    scvtf d0, s0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: scvtf_f64i32:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addv s0, v0.4s
+; CHECK-NO-FPRCVT-NEXT:    fmov w8, s0
+; CHECK-NO-FPRCVT-NEXT:    scvtf d0, w8
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addv = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %x)
+ %conv = sitofp i32 %addv to double
+ ret double %conv
+}
+
+define half @scvtf_f16i64(<2 x i64> %x) {
+; CHECK-LABEL: scvtf_f16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    scvtf h0, d0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: scvtf_f16i64:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addp d0, v0.2d
+; CHECK-NO-FPRCVT-NEXT:    fmov x8, d0
+; CHECK-NO-FPRCVT-NEXT:    scvtf s0, x8
+; CHECK-NO-FPRCVT-NEXT:    fcvt h0, s0
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addp = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %x)
+ %conv = sitofp i64 %addp to half
+ ret half %conv
+}
+
+define float @scvtf_f32i64(<2 x i64> %x) {
+; CHECK-LABEL: scvtf_f32i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    scvtf s0, d0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: scvtf_f32i64:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addp d0, v0.2d
+; CHECK-NO-FPRCVT-NEXT:    fmov x8, d0
+; CHECK-NO-FPRCVT-NEXT:    scvtf s0, x8
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addp = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %x)
+ %conv = sitofp i64 %addp to float
+ ret float %conv
+}
+
+; UCVTF
+
+define half @ucvtf_f16i32(<4 x i32> %x) {
+; CHECK-LABEL: ucvtf_f16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    ucvtf h0, s0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: ucvtf_f16i32:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addv s0, v0.4s
+; CHECK-NO-FPRCVT-NEXT:    ucvtf s0, s0
+; CHECK-NO-FPRCVT-NEXT:    fcvt h0, s0
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addv = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %x)
+ %conv = uitofp i32 %addv to half
+ ret half %conv
+}
+
+define double @ucvtf_f64i32(<4 x i32> %x) {
+; CHECK-LABEL: ucvtf_f64i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    ucvtf d0, s0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: ucvtf_f64i32:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addv s0, v0.4s
+; CHECK-NO-FPRCVT-NEXT:    fmov w8, s0
+; CHECK-NO-FPRCVT-NEXT:    ucvtf d0, w8
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addv = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %x)
+ %conv = uitofp i32 %addv to double
+ ret double %conv
+}
+
+define half @ucvtf_f16i64(<2 x i64> %x) {
+; CHECK-LABEL: ucvtf_f16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    ucvtf h0, d0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: ucvtf_f16i64:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addp d0, v0.2d
+; CHECK-NO-FPRCVT-NEXT:    fmov x8, d0
+; CHECK-NO-FPRCVT-NEXT:    ucvtf s0, x8
+; CHECK-NO-FPRCVT-NEXT:    fcvt h0, s0
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addp = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %x)
+ %conv = uitofp i64 %addp to half
+ ret half %conv
+}
+
+define float @ucvtf_f32i64(<2 x i64> %x) {
+; CHECK-LABEL: ucvtf_f32i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    ucvtf s0, d0
+; CHECK-NEXT:    ret
+;
+; CHECK-NO-FPRCVT-LABEL: ucvtf_f32i64:
+; CHECK-NO-FPRCVT:       // %bb.0:
+; CHECK-NO-FPRCVT-NEXT:    addp d0, v0.2d
+; CHECK-NO-FPRCVT-NEXT:    fmov x8, d0
+; CHECK-NO-FPRCVT-NEXT:    ucvtf s0, x8
+; CHECK-NO-FPRCVT-NEXT:    ret
+ %addp = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %x)
+ %conv = uitofp i64 %addp to float
+ ret float %conv
+}



More information about the llvm-commits mailing list