[llvm] [SPIRV] Added support for the constrained arithmetic(Fmuladd) intrinsic (PR #170270)
Subash B via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 2 19:52:28 PST 2025
https://github.com/SubashBoopathi updated https://github.com/llvm/llvm-project/pull/170270
>From 589c1b9bad79a67b6a550b7f377d2f26167602fb Mon Sep 17 00:00:00 2001
From: Subash B <subash.boopathi at multicorewareinc.com>
Date: Tue, 2 Dec 2025 15:22:35 +0530
Subject: [PATCH] [SPIRV] Added Support for constrained fmuladd intrinsic
---
.../Target/SPIRV/SPIRVPrepareFunctions.cpp | 25 ++++++++
.../llvm-intrinsics/constrained-fmuladd.ll | 64 +++++++++++++++++++
2 files changed, 89 insertions(+)
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-fmuladd.ll
diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
index 2bffbf73b574a..13888123420ec 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
@@ -374,11 +374,30 @@ static bool toSpvOverloadedIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID,
return true;
}
+static void
+lowerConstrainedFmuladd(IntrinsicInst *II,
+ SmallVector<Instruction *> &EraseFromParent) {
+ auto *FPI = cast<ConstrainedFPIntrinsic>(II);
+ Value *A = FPI->getArgOperand(0);
+ Value *Mul = FPI->getArgOperand(1);
+ Value *Add = FPI->getArgOperand(2);
+ IRBuilder<> Builder(II->getParent());
+ Builder.SetInsertPoint(II);
+ std::optional<RoundingMode> Rounding = FPI->getRoundingMode();
+ Value *Product = Builder.CreateFMul(A, Mul, II->getName() + ".mul");
+ Value *Result = Builder.CreateConstrainedFPBinOp(
+ Intrinsic::experimental_constrained_fadd, Product, Add, {},
+ II->getName() + ".add", nullptr, Rounding);
+ II->replaceAllUsesWith(Result);
+ EraseFromParent.push_back(II);
+}
+
// Substitutes calls to LLVM intrinsics with either calls to SPIR-V intrinsics
// or calls to proper generated functions. Returns True if F was modified.
bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
bool Changed = false;
const SPIRVSubtarget &STI = TM.getSubtarget<SPIRVSubtarget>(*F);
+ SmallVector<Instruction *> EraseFromParent;
for (BasicBlock &BB : *F) {
for (Instruction &I : BB) {
auto Call = dyn_cast<CallInst>(&I);
@@ -420,9 +439,15 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
lowerPtrAnnotation(II);
Changed = true;
break;
+ case Intrinsic::experimental_constrained_fmuladd:
+ lowerConstrainedFmuladd(II, EraseFromParent);
+ Changed = true;
+ break;
}
}
}
+ for (auto *I : EraseFromParent)
+ I->eraseFromParent();
return Changed;
}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-fmuladd.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-fmuladd.ll
new file mode 100644
index 0000000000000..340f2d78fc21b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-fmuladd.ll
@@ -0,0 +1,64 @@
+; RUN: llc -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpDecorate %[[#]] FPRoundingMode RTE
+; CHECK-DAG: OpDecorate %[[#]] FPRoundingMode RTZ
+; CHECK-DAG: OpDecorate %[[#]] FPRoundingMode RTP
+; CHECK-DAG: OpDecorate %[[#]] FPRoundingMode RTN
+; CHECK-DAG: OpDecorate %[[#]] FPRoundingMode RTE
+
+; CHECK: OpFMul %[[#]] %[[#]] %[[#]]
+; CHECK: OpFAdd %[[#]] %[[#]] %[[#]]
+define spir_kernel void @test_f32(float %a) {
+entry:
+ %r = tail call float @llvm.experimental.constrained.fmuladd.f32(
+ float %a, float %a, float %a,
+ metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
+}
+
+; CHECK: OpFMul %[[#]] %[[#]] %[[#]]
+; CHECK: OpFAdd %[[#]] %[[#]] %[[#]]
+define spir_kernel void @test_f64(double %a) {
+entry:
+ %r = tail call double @llvm.experimental.constrained.fmuladd.f64(
+ double %a, double %a, double %a,
+ metadata !"round.towardzero", metadata !"fpexcept.strict")
+ ret void
+}
+
+; CHECK: OpFMul %[[#]] %[[#]] %[[#]]
+; CHECK: OpFAdd %[[#]] %[[#]] %[[#]]
+define spir_kernel void @test_v2f32(<2 x float> %a) {
+entry:
+ %r = tail call <2 x float> @llvm.experimental.constrained.fmuladd.v2f32(
+ <2 x float> %a, <2 x float> %a, <2 x float> %a,
+ metadata !"round.upward", metadata !"fpexcept.strict")
+ ret void
+}
+
+; CHECK: OpFMul %[[#]] %[[#]] %[[#]]
+; CHECK: OpFAdd %[[#]] %[[#]] %[[#]]
+define spir_kernel void @test_v4f32(<4 x float> %a) {
+entry:
+ %r = tail call <4 x float> @llvm.experimental.constrained.fmuladd.v4f32(
+ <4 x float> %a, <4 x float> %a, <4 x float> %a,
+ metadata !"round.downward", metadata !"fpexcept.strict")
+ ret void
+}
+
+; CHECK: OpFMul %[[#]] %[[#]] %[[#]]
+; CHECK: OpFAdd %[[#]] %[[#]] %[[#]]
+define spir_kernel void @test_v2f64(<2 x double> %a) {
+entry:
+ %r = tail call <2 x double> @llvm.experimental.constrained.fmuladd.v2f64(
+ <2 x double> %a, <2 x double> %a, <2 x double> %a,
+ metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
+}
+
+declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata)
+declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata)
+declare <2 x float> @llvm.experimental.constrained.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
More information about the llvm-commits
mailing list