[llvm] [SPIRV] Added Support for the constrained conversion intrinsics (PR #157437)
Subash B via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 9 05:17:39 PDT 2025
https://github.com/SubashBoopathi updated https://github.com/llvm/llvm-project/pull/157437
>From 8c3294e06860e1b49a3aeea0852b4e7951d21dbf Mon Sep 17 00:00:00 2001
From: Subash B <subash.boopathi at multicorewareinc.com>
Date: Thu, 31 Jul 2025 14:48:49 +0530
Subject: [PATCH 1/2] Experimental Constrained Conversion intrinsics
---
llvm/include/llvm/Support/TargetOpcodes.def | 6 +
llvm/include/llvm/Target/GenericOpcodes.td | 6 +
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 12 ++
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 127 ++++++++++++++++++
llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp | 13 +-
.../llvm-intrinsics/constrained-convert.ll | 54 ++++++++
6 files changed, 217 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index b905576b61791..ea4eaff9cbd39 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -899,6 +899,12 @@ HANDLE_TARGET_OPCODE(G_STRICT_FREM)
HANDLE_TARGET_OPCODE(G_STRICT_FMA)
HANDLE_TARGET_OPCODE(G_STRICT_FSQRT)
HANDLE_TARGET_OPCODE(G_STRICT_FLDEXP)
+HANDLE_TARGET_OPCODE(G_STRICT_FPTOSI)
+HANDLE_TARGET_OPCODE(G_STRICT_SITOFP)
+HANDLE_TARGET_OPCODE(G_STRICT_UITOFP)
+HANDLE_TARGET_OPCODE(G_STRICT_FPTOUI)
+HANDLE_TARGET_OPCODE(G_STRICT_FPEXT)
+HANDLE_TARGET_OPCODE(G_STRICT_FPTRUNC)
/// read_register intrinsic
HANDLE_TARGET_OPCODE(G_READ_REGISTER)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index ce4750db88c9a..637eab4ad6c77 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1716,6 +1716,12 @@ def G_STRICT_FREM : ConstrainedInstruction<G_FREM>;
def G_STRICT_FMA : ConstrainedInstruction<G_FMA>;
def G_STRICT_FSQRT : ConstrainedInstruction<G_FSQRT>;
def G_STRICT_FLDEXP : ConstrainedInstruction<G_FLDEXP>;
+def G_STRICT_SITOFP: ConstrainedInstruction<G_SITOFP>;
+def G_STRICT_UITOFP: ConstrainedInstruction<G_UITOFP>;
+def G_STRICT_FPTOSI: ConstrainedInstruction<G_FPTOSI>;
+def G_STRICT_FPTOUI: ConstrainedInstruction<G_FPTOUI>;
+def G_STRICT_FPEXT: ConstrainedInstruction<G_FPEXT>;
+def G_STRICT_FPTRUNC: ConstrainedInstruction<G_FPTRUNC>;
//------------------------------------------------------------------------------
// Memory intrinsics
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index d7280eaba2440..00866db2a943e 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2061,6 +2061,18 @@ static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
return TargetOpcode::G_STRICT_FSQRT;
case Intrinsic::experimental_constrained_ldexp:
return TargetOpcode::G_STRICT_FLDEXP;
+ case Intrinsic::experimental_constrained_sitofp:
+ return TargetOpcode::G_STRICT_SITOFP;
+ case Intrinsic::experimental_constrained_uitofp:
+ return TargetOpcode::G_STRICT_UITOFP;
+ case Intrinsic::experimental_constrained_fptosi:
+ return TargetOpcode::G_STRICT_FPTOSI;
+ case Intrinsic::experimental_constrained_fptoui:
+ return TargetOpcode::G_STRICT_FPTOUI;
+ case Intrinsic::experimental_constrained_fpext:
+ return TargetOpcode::G_STRICT_FPEXT;
+ case Intrinsic::experimental_constrained_fptrunc:
+ return TargetOpcode::G_STRICT_FPTRUNC;
default:
return 0;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 6608b3f2cbefd..8576c0b9c26ac 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -224,6 +224,22 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool IsSigned) const;
bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
bool IsSigned, unsigned Opcode) const;
+
+ bool selectStrictSIToF(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
+ bool selectStrictUIToF(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
+ bool selectStrictFPToS(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
+ bool selectStrictFPToU(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
+ bool selectStrictFPEXT(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
bool IsSigned) const;
@@ -668,6 +684,18 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
case TargetOpcode::G_UITOFP:
return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
+ case TargetOpcode::G_STRICT_SITOFP:
+ return selectStrictSIToF(ResVReg, ResType, I);
+ case TargetOpcode::G_STRICT_UITOFP:
+ return selectStrictUIToF(ResVReg, ResType, I);
+ case TargetOpcode::G_STRICT_FPTOSI:
+ return selectStrictFPToS(ResVReg, ResType, I);
+ case TargetOpcode::G_STRICT_FPTOUI:
+ return selectStrictFPToU(ResVReg, ResType, I);
+ case TargetOpcode::G_STRICT_FPEXT:
+ case TargetOpcode::G_STRICT_FPTRUNC:
+ return selectStrictFPEXT(ResVReg, ResType, I);
+
case TargetOpcode::G_CTPOP:
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
case TargetOpcode::G_SMIN:
@@ -2574,6 +2602,105 @@ bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
.addUse(ZeroReg)
.constrainAllUses(TII, TRI, RBI);
}
+bool SPIRVInstructionSelector::selectStrictSIToF(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // Convert a signed integer to a floating-point value using OpConvertSToF.
+ // If the source is a boolean, first convert it to an integer of matching bit
+ // width.
+ Register SrcReg = I.getOperand(1).getReg();
+ if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
+ unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
+ SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
+ if (ResType->getOpcode() == SPIRV::OpTypeVector) {
+ const unsigned NumElts = ResType->getOperand(2).getImm();
+ TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
+ }
+ SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
+ selectSelect(SrcReg, TmpType, I, false);
+ }
+ return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertSToF);
+}
+
+bool SPIRVInstructionSelector::selectStrictUIToF(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // Convert an unsigned integer to a floating-point value using OpConvertUToF.
+ // If the source is a boolean, first convert it to an integer of matching bit
+ // width.
+ Register SrcReg = I.getOperand(1).getReg();
+ if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
+ unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
+ SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
+ if (ResType->getOpcode() == SPIRV::OpTypeVector) {
+ const unsigned NumElts = ResType->getOperand(2).getImm();
+ TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
+ }
+ SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
+ selectSelect(SrcReg, TmpType, I, false);
+ }
+ return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertUToF);
+}
+
+bool SPIRVInstructionSelector::selectStrictFPToS(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // Convert a floating-point value to a signed integer using OpConvertFToS.
+ // If the source is a boolean, first convert it to a float of matching bit
+ // width.
+ Register SrcReg = I.getOperand(1).getReg();
+ if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
+ unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
+ SPIRVType *TmpType = GR.getOrCreateSPIRVFloatType(BitWidth, I, TII);
+ if (ResType->getOpcode() == SPIRV::OpTypeVector) {
+ const unsigned NumElts = ResType->getOperand(2).getImm();
+ TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
+ }
+ SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
+ selectSelect(SrcReg, TmpType, I, false);
+ }
+ return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertFToS);
+}
+
+bool SPIRVInstructionSelector::selectStrictFPToU(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // Convert a floating-point value to an unsigned integer using OpConvertFToU.
+ // If the source is a boolean, first convert it to a float of matching bit
+ // width.
+ Register SrcReg = I.getOperand(1).getReg();
+ if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
+ unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
+ SPIRVType *TmpType = GR.getOrCreateSPIRVFloatType(BitWidth, I, TII);
+ if (ResType->getOpcode() == SPIRV::OpTypeVector) {
+ const unsigned NumElts = ResType->getOperand(2).getImm();
+ TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
+ }
+ SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
+ selectSelect(SrcReg, TmpType, I, false);
+ }
+ return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertFToU);
+}
+
+bool SPIRVInstructionSelector::selectStrictFPEXT(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // Extend a floating-point value to a larger floating-point type using
+ // OpFConvert. If the source is a boolean, first convert it to a float of
+ // matching bit width.
+ Register SrcReg = I.getOperand(1).getReg();
+ if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
+ unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
+ SPIRVType *TmpType = GR.getOrCreateSPIRVFloatType(BitWidth, I, TII);
+ if (ResType->getOpcode() == SPIRV::OpTypeVector) {
+ const unsigned NumElts = ResType->getOperand(2).getImm();
+ TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
+ }
+ SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
+ selectSelect(SrcReg, TmpType, I, false);
+ }
+ return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpFConvert);
+}
bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
const SPIRVType *ResType,
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 721f64a329d31..3fa56f9fec857 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -60,7 +60,7 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
const LLT v4s16 = LLT::fixed_vector(4, 16);
const LLT v4s8 = LLT::fixed_vector(4, 8);
const LLT v4s1 = LLT::fixed_vector(4, 1);
-
+
const LLT v3s64 = LLT::fixed_vector(3, 64);
const LLT v3s32 = LLT::fixed_vector(3, 32);
const LLT v3s16 = LLT::fixed_vector(3, 16);
@@ -203,6 +203,17 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
.legalForCartesianProduct(allIntScalarsAndVectors,
allFloatScalarsAndVectors);
+ getActionDefinitionsBuilder({G_STRICT_SITOFP, G_STRICT_UITOFP})
+ .legalForCartesianProduct(allFloatScalarsAndVectors,
+ allScalarsAndVectors);
+
+ getActionDefinitionsBuilder({G_STRICT_FPTOSI, G_STRICT_FPTOUI})
+ .legalForCartesianProduct(allIntScalarsAndVectors,
+ allFloatScalarsAndVectors);
+
+ getActionDefinitionsBuilder({G_STRICT_FPEXT, G_STRICT_FPTRUNC})
+ .legalForCartesianProduct(allFloatScalarsAndVectors);
+
getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
.legalForCartesianProduct(allFloatScalarsAndVectors,
allScalarsAndVectors);
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll
new file mode 100644
index 0000000000000..5619d99cd7073
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll
@@ -0,0 +1,54 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#sf:]] "conv"
+; CHECK-DAG: OpName %[[#uf:]] "conv1"
+; CHECK-DAG: OpName %[[#fs:]] "conv2"
+; CHECK-DAG: OpName %[[#fu:]] "conv3"
+; CHECK-DAG: OpName %[[#fe:]] "conv4"
+; CHECK-DAG: OpName %[[#ft:]] "conv5"
+
+; CHECK-DAG: OpDecorate %[[#sf]] FPRoundingMode RTE
+; CHECK-DAG: OpDecorate %[[#uf]] FPRoundingMode RTZ
+; CHECK-DAG: OpDecorate %[[#ft]] FPRoundingMode RTP
+
+; CHECK-NOT: OpDecorate %[[#fs]] FPRoundingMode
+; CHECK-NOT: OpDecorate %[[#fu]] FPRoundingMode
+; CHECK-NOT: OpDecorate %[[#fe]] FPRoundingMode
+
+; CHECK: %[[#sf]] = OpConvertSToF
+; CHECK: %[[#uf]] = OpConvertUToF
+; CHECK: %[[#fs]] = OpConvertFToS
+; CHECK: %[[#fu]] = OpConvertFToU
+; CHECK: %[[#fe]] = OpFConvert
+; CHECK: %[[#ft]] = OpFConvert
+
+; Function Attrs: norecurse nounwind strictfp
+define dso_local spir_kernel void @test(float %a, i32 %in, i32 %ui) {
+entry:
+ %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %in, metadata !"round.tonearest", metadata !"fpexcept.strict") #2
+ %conv1 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %ui, metadata !"round.towardzero", metadata !"fpexcept.ignore") #2
+ %conv2 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %conv1, metadata !"fpexcept.ignore") #2
+ %conv3 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %conv1, metadata !"fpexcept.ignore") #2
+ %conv4 = tail call double @llvm.experimental.constrained.fpext.f64.f32(float %conv1, metadata !"fpexcept.ignore") #2
+ %conv5 = tail call float @llvm.experimental.constrained.fptrunc.f32.f64(double %conv4, metadata !"round.upward", metadata !"fpexcept.ignore") #2
+ ret void
+}
+
+; Function Attrs: inaccessiblememonly nounwind willreturn
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) #1
+
+; Function Attrs: inaccessiblememonly nounwind willreturn
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata) #1
+
+; Function Attrs: inaccessiblememonly nounwind willreturn
+declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) #1
+
+; Function Attrs: inaccessiblememonly nounwind willreturn
+declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) #1
+
+; Function Attrs: inaccessiblememonly nounwind willreturn
+declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) #1
+
+; Function Attrs: inaccessiblememonly nounwind willreturn
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) #1
>From f441f524637229816002e313710b16c72552bebe Mon Sep 17 00:00:00 2001
From: Subash B <subash.boopathi at multicorewareinc.com>
Date: Mon, 4 Aug 2025 16:01:25 +0530
Subject: [PATCH 2/2] Experimental Constrained Conversion intrinsics
---
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 2 +-
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 125 +---------
llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp | 4 +-
.../llvm-intrinsics/constrained-convert.ll | 221 ++++++++++++++----
4 files changed, 182 insertions(+), 170 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 00866db2a943e..2586b2520eca7 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2071,7 +2071,7 @@ static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
return TargetOpcode::G_STRICT_FPTOUI;
case Intrinsic::experimental_constrained_fpext:
return TargetOpcode::G_STRICT_FPEXT;
- case Intrinsic::experimental_constrained_fptrunc:
+ case Intrinsic::experimental_constrained_fptrunc:
return TargetOpcode::G_STRICT_FPTRUNC;
default:
return 0;
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 8576c0b9c26ac..536692b435e7a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -224,22 +224,6 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool IsSigned) const;
bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
bool IsSigned, unsigned Opcode) const;
-
- bool selectStrictSIToF(Register ResVReg, const SPIRVType *ResType,
- MachineInstr &I) const;
-
- bool selectStrictUIToF(Register ResVReg, const SPIRVType *ResType,
- MachineInstr &I) const;
-
- bool selectStrictFPToS(Register ResVReg, const SPIRVType *ResType,
- MachineInstr &I) const;
-
- bool selectStrictFPToU(Register ResVReg, const SPIRVType *ResType,
- MachineInstr &I) const;
-
- bool selectStrictFPEXT(Register ResVReg, const SPIRVType *ResType,
- MachineInstr &I) const;
-
bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
bool IsSigned) const;
@@ -685,16 +669,16 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
case TargetOpcode::G_STRICT_SITOFP:
- return selectStrictSIToF(ResVReg, ResType, I);
+ return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertSToF);
case TargetOpcode::G_STRICT_UITOFP:
- return selectStrictUIToF(ResVReg, ResType, I);
+ return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToF);
case TargetOpcode::G_STRICT_FPTOSI:
- return selectStrictFPToS(ResVReg, ResType, I);
+ return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
case TargetOpcode::G_STRICT_FPTOUI:
- return selectStrictFPToU(ResVReg, ResType, I);
+ return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
case TargetOpcode::G_STRICT_FPEXT:
case TargetOpcode::G_STRICT_FPTRUNC:
- return selectStrictFPEXT(ResVReg, ResType, I);
+ return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
case TargetOpcode::G_CTPOP:
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
@@ -2602,105 +2586,6 @@ bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
.addUse(ZeroReg)
.constrainAllUses(TII, TRI, RBI);
}
-bool SPIRVInstructionSelector::selectStrictSIToF(Register ResVReg,
- const SPIRVType *ResType,
- MachineInstr &I) const {
- // Convert a signed integer to a floating-point value using OpConvertSToF.
- // If the source is a boolean, first convert it to an integer of matching bit
- // width.
- Register SrcReg = I.getOperand(1).getReg();
- if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
- unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
- SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
- if (ResType->getOpcode() == SPIRV::OpTypeVector) {
- const unsigned NumElts = ResType->getOperand(2).getImm();
- TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
- }
- SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
- selectSelect(SrcReg, TmpType, I, false);
- }
- return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertSToF);
-}
-
-bool SPIRVInstructionSelector::selectStrictUIToF(Register ResVReg,
- const SPIRVType *ResType,
- MachineInstr &I) const {
- // Convert an unsigned integer to a floating-point value using OpConvertUToF.
- // If the source is a boolean, first convert it to an integer of matching bit
- // width.
- Register SrcReg = I.getOperand(1).getReg();
- if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
- unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
- SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
- if (ResType->getOpcode() == SPIRV::OpTypeVector) {
- const unsigned NumElts = ResType->getOperand(2).getImm();
- TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
- }
- SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
- selectSelect(SrcReg, TmpType, I, false);
- }
- return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertUToF);
-}
-
-bool SPIRVInstructionSelector::selectStrictFPToS(Register ResVReg,
- const SPIRVType *ResType,
- MachineInstr &I) const {
- // Convert a floating-point value to a signed integer using OpConvertFToS.
- // If the source is a boolean, first convert it to a float of matching bit
- // width.
- Register SrcReg = I.getOperand(1).getReg();
- if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
- unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
- SPIRVType *TmpType = GR.getOrCreateSPIRVFloatType(BitWidth, I, TII);
- if (ResType->getOpcode() == SPIRV::OpTypeVector) {
- const unsigned NumElts = ResType->getOperand(2).getImm();
- TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
- }
- SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
- selectSelect(SrcReg, TmpType, I, false);
- }
- return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertFToS);
-}
-
-bool SPIRVInstructionSelector::selectStrictFPToU(Register ResVReg,
- const SPIRVType *ResType,
- MachineInstr &I) const {
- // Convert a floating-point value to an unsigned integer using OpConvertFToU.
- // If the source is a boolean, first convert it to a float of matching bit
- // width.
- Register SrcReg = I.getOperand(1).getReg();
- if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
- unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
- SPIRVType *TmpType = GR.getOrCreateSPIRVFloatType(BitWidth, I, TII);
- if (ResType->getOpcode() == SPIRV::OpTypeVector) {
- const unsigned NumElts = ResType->getOperand(2).getImm();
- TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
- }
- SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
- selectSelect(SrcReg, TmpType, I, false);
- }
- return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpConvertFToU);
-}
-
-bool SPIRVInstructionSelector::selectStrictFPEXT(Register ResVReg,
- const SPIRVType *ResType,
- MachineInstr &I) const {
- // Extend a floating-point value to a larger floating-point type using
- // OpFConvert. If the source is a boolean, first convert it to a float of
- // matching bit width.
- Register SrcReg = I.getOperand(1).getReg();
- if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool)) {
- unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
- SPIRVType *TmpType = GR.getOrCreateSPIRVFloatType(BitWidth, I, TII);
- if (ResType->getOpcode() == SPIRV::OpTypeVector) {
- const unsigned NumElts = ResType->getOperand(2).getImm();
- TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
- }
- SrcReg = createVirtualRegister(TmpType, &GR, MRI, MRI->getMF());
- selectSelect(SrcReg, TmpType, I, false);
- }
- return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, SPIRV::OpFConvert);
-}
bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
const SPIRVType *ResType,
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 3fa56f9fec857..d8106f159fab5 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -60,7 +60,7 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
const LLT v4s16 = LLT::fixed_vector(4, 16);
const LLT v4s8 = LLT::fixed_vector(4, 8);
const LLT v4s1 = LLT::fixed_vector(4, 1);
-
+
const LLT v3s64 = LLT::fixed_vector(3, 64);
const LLT v3s32 = LLT::fixed_vector(3, 32);
const LLT v3s16 = LLT::fixed_vector(3, 16);
@@ -205,7 +205,7 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
getActionDefinitionsBuilder({G_STRICT_SITOFP, G_STRICT_UITOFP})
.legalForCartesianProduct(allFloatScalarsAndVectors,
- allScalarsAndVectors);
+ allIntScalarsAndVectors);
getActionDefinitionsBuilder({G_STRICT_FPTOSI, G_STRICT_FPTOUI})
.legalForCartesianProduct(allIntScalarsAndVectors,
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll
index 5619d99cd7073..d324408c665af 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-convert.ll
@@ -1,54 +1,181 @@
-; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName %[[#sf:]] "conv"
-; CHECK-DAG: OpName %[[#uf:]] "conv1"
-; CHECK-DAG: OpName %[[#fs:]] "conv2"
-; CHECK-DAG: OpName %[[#fu:]] "conv3"
-; CHECK-DAG: OpName %[[#fe:]] "conv4"
-; CHECK-DAG: OpName %[[#ft:]] "conv5"
+; CHECK-DAG: OpName %[[#sf1:]] "conv1"
+; CHECK-DAG: OpName %[[#sf2:]] "conv2"
+; CHECK-DAG: OpName %[[#sf3:]] "conv3"
+; CHECK-DAG: OpName %[[#sf4:]] "conv4"
+; CHECK-DAG: OpName %[[#uf1:]] "conv5"
+; CHECK-DAG: OpName %[[#uf2:]] "conv6"
+; CHECK-DAG: OpName %[[#uf3:]] "conv7"
+; CHECK-DAG: OpName %[[#uf4:]] "conv8"
+; CHECK-DAG: OpName %[[#uf5:]] "conv9"
+; CHECK-DAG: OpName %[[#fs1:]] "conv10"
+; CHECK-DAG: OpName %[[#fs2:]] "conv11"
+; CHECK-DAG: OpName %[[#fs3:]] "conv12"
+; CHECK-DAG: OpName %[[#fs4:]] "conv13"
+; CHECK-DAG: OpName %[[#fs5:]] "conv14"
+; CHECK-DAG: OpName %[[#fu1:]] "conv15"
+; CHECK-DAG: OpName %[[#fu2:]] "conv16"
+; CHECK-DAG: OpName %[[#fu3:]] "conv17"
+; CHECK-DAG: OpName %[[#fu4:]] "conv18"
+; CHECK-DAG: OpName %[[#fu5:]] "conv19"
+; CHECK-DAG: OpName %[[#fe1:]] "conv20"
+; CHECK-DAG: OpName %[[#fe2:]] "conv21"
+; CHECK-DAG: OpName %[[#ft1:]] "conv22"
+; CHECK-DAG: OpName %[[#ft2:]] "conv23"
-; CHECK-DAG: OpDecorate %[[#sf]] FPRoundingMode RTE
-; CHECK-DAG: OpDecorate %[[#uf]] FPRoundingMode RTZ
-; CHECK-DAG: OpDecorate %[[#ft]] FPRoundingMode RTP
-
-; CHECK-NOT: OpDecorate %[[#fs]] FPRoundingMode
-; CHECK-NOT: OpDecorate %[[#fu]] FPRoundingMode
-; CHECK-NOT: OpDecorate %[[#fe]] FPRoundingMode
+; CHECK-DAG: OpConvertSToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertSToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertSToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertSToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertSToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertUToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertUToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertUToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertUToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertUToF %[[#]] %[[#]]
+; CHECK-DAG: OpConvertFToS %[[#]] %[[#]]
+; CHECK-DAG: OpConvertFToS %[[#]] %[[#]]
+; CHECK-DAG: OpConvertFToS %[[#]] %[[#]]
+; CHECK-DAG: OpConvertFToS %[[#]] %[[#]]
+; CHECK-DAG: OpConvertFToS %[[#]] %[[#]]
+; CHECK-DAG: OpFConvert %[[#]] %[[#]]
+; CHECK-DAG: OpFConvert %[[#]] %[[#]]
+; CHECK-DAG: OpFConvert %[[#]] %[[#]]
+; CHECK-DAG: OpFConvert %[[#]] %[[#]]
-; CHECK: %[[#sf]] = OpConvertSToF
-; CHECK: %[[#uf]] = OpConvertUToF
-; CHECK: %[[#fs]] = OpConvertFToS
-; CHECK: %[[#fu]] = OpConvertFToU
-; CHECK: %[[#fe]] = OpFConvert
-; CHECK: %[[#ft]] = OpFConvert
+; CHECK-DAG: OpDecorate %[[#sf]] FPRoundingMode RTE
+; CHECK-DAG: OpDecorate %[[#sf1]] FPRoundingMode RTZ
+; CHECK-DAG: OpDecorate %[[#sf2]] FPRoundingMode RTP
+; CHECK-DAG: OpDecorate %[[#sf3]] FPRoundingMode RTN
+; CHECK-DAG: OpDecorate %[[#sf4]] FPRoundingMode RTE
+; CHECK-DAG: OpDecorate %[[#uf1]] FPRoundingMode RTE
+; CHECK-DAG: OpDecorate %[[#uf2]] FPRoundingMode RTZ
+; CHECK-DAG: OpDecorate %[[#uf3]] FPRoundingMode RTP
+; CHECK-DAG: OpDecorate %[[#uf4]] FPRoundingMode RTN
+; CHECK-DAG: OpDecorate %[[#uf5]] FPRoundingMode RTE
+; CHECK-DAG: OpDecorate %[[#ft1]] FPRoundingMode RTZ
+; CHECK-DAG: OpDecorate %[[#ft2]] FPRoundingMode RTE
-; Function Attrs: norecurse nounwind strictfp
-define dso_local spir_kernel void @test(float %a, i32 %in, i32 %ui) {
+define dso_local spir_kernel void @test1(i32 %in) {
entry:
- %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %in, metadata !"round.tonearest", metadata !"fpexcept.strict") #2
- %conv1 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %ui, metadata !"round.towardzero", metadata !"fpexcept.ignore") #2
- %conv2 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %conv1, metadata !"fpexcept.ignore") #2
- %conv3 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %conv1, metadata !"fpexcept.ignore") #2
- %conv4 = tail call double @llvm.experimental.constrained.fpext.f64.f32(float %conv1, metadata !"fpexcept.ignore") #2
- %conv5 = tail call float @llvm.experimental.constrained.fptrunc.f32.f64(double %conv4, metadata !"round.upward", metadata !"fpexcept.ignore") #2
- ret void
+ %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test2(i16 %in) {
+entry:
+ %conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %in, metadata !"round.towardzero", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test3(i16 %in) {
+entry:
+ %conv2 = tail call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %in, metadata !"round.upward", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test4(i16 %in) {
+entry:
+ %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i16 %in, metadata !"round.downward", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test5(<4 x i16> %in) {
+entry:
+ %conv4 = tail call <4 x double > @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i16> %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test6(i32 %in) {
+entry:
+ %conv5 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test7(i32 %in) {
+entry:
+ %conv6 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %in, metadata !"round.towardzero", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test8(i16 %in) {
+entry:
+ %conv7 = tail call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %in, metadata !"round.upward", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test9(i16 %in) {
+entry:
+ %conv8 = tail call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %in, metadata !"round.downward", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test10(<4 x i32> %in) {
+entry:
+ %conv9 = tail call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test11(float %in) {
+entry:
+ %conv10 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test12(double %in) {
+entry:
+ %conv11 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test13(float %in) {
+entry:
+ %conv12 = tail call i16 @llvm.experimental.constrained.fptosi.i16.f64(float %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test14(double %in) {
+entry:
+ %conv13 = tail call i16 @llvm.experimental.constrained.fptosi.i16.f64(double %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test15(<4 x double> %in) {
+entry:
+ %conv14 = tail call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double> %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test16(float %in) {
+entry:
+ %conv15 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test17(double %in) {
+entry:
+ %conv16 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test18(float %in) {
+entry:
+ %conv17 = tail call i16 @llvm.experimental.constrained.fptoui.i16.f32(float %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test19(double %in) {
+entry:
+ %conv18 = tail call i16 @llvm.experimental.constrained.fptoui.i16.f64(double %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test20( <4 x double> %in) {
+entry:
+ %conv19 = tail call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double> %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test21(float %in) {
+entry:
+ %conv20 = tail call double @llvm.experimental.constrained.fpext.f64.f32(float %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test22(<4 x float> %in) {
+entry:
+ %conv21 = tail call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32( <4 x float> %in, metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test23(<4 x double> %in) {
+entry:
+ %conv22 = tail call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64( <4 x double> %in,metadata !"round.towardzero", metadata !"fpexcept.strict")
+ ret void
+}
+define dso_local spir_kernel void @test24(double %in) {
+entry:
+ %conv23 = tail call float @llvm.experimental.constrained.fptrunc.f32.f64( double %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret void
}
-
-; Function Attrs: inaccessiblememonly nounwind willreturn
-declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) #1
-
-; Function Attrs: inaccessiblememonly nounwind willreturn
-declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata) #1
-
-; Function Attrs: inaccessiblememonly nounwind willreturn
-declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) #1
-
-; Function Attrs: inaccessiblememonly nounwind willreturn
-declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) #1
-
-; Function Attrs: inaccessiblememonly nounwind willreturn
-declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) #1
-
-; Function Attrs: inaccessiblememonly nounwind willreturn
-declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) #1
More information about the llvm-commits
mailing list