[clang] 91f886a - [FPEnv][TableGen] Add strictfp attribute to constrained intrinsics by default.

Kevin P. Neal via cfe-commits cfe-commits at lists.llvm.org
Wed Jul 12 06:56:00 PDT 2023


Author: Kevin P. Neal
Date: 2023-07-12T09:55:53-04:00
New Revision: 91f886a40d3fbabfc539c2bd8977a1ccb45aa450

URL: https://github.com/llvm/llvm-project/commit/91f886a40d3fbabfc539c2bd8977a1ccb45aa450
DIFF: https://github.com/llvm/llvm-project/commit/91f886a40d3fbabfc539c2bd8977a1ccb45aa450.diff

LOG: [FPEnv][TableGen] Add strictfp attribute to constrained intrinsics by default.

In D146869 @arsenm pointed out that the constrained intrinsics aren't
getting the strictfp attribute by default. They should be since they are
required to have it anyway.

TableGen did not know about this attribute until now. This patch adds
strictfp to TableGen, and it uses it on all of the constrained intrinsics.

Differential Revision: https://reviews.llvm.org/D154991

Added: 
    llvm/test/Assembler/fp-intrinsics-attr.ll

Modified: 
    clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
    llvm/include/llvm/IR/Intrinsics.td
    llvm/test/Verifier/fp-intrinsics-pass.ll
    llvm/utils/TableGen/CodeGenIntrinsics.cpp
    llvm/utils/TableGen/CodeGenIntrinsics.h
    llvm/utils/TableGen/IntrinsicEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
index e7433787c89c88..605790164a7898 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -171,7 +171,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
 // STRICTFP: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind optnone strictfp "stack-protector-buffer-size"="8" "uniform-work-group-size"="false" }
 // STRICTFP: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
 // STRICTFP: attributes #[[ATTR2]] = { convergent noinline nounwind optnone strictfp "stack-protector-buffer-size"="8" }
-// STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+// STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) }
 // STRICTFP: attributes #[[ATTR4]] = { convergent nounwind "stack-protector-buffer-size"="8" }
 // STRICTFP: attributes #[[ATTR5]] = { strictfp }
 //.

diff  --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 0779d1fd958f60..638a9fda29b15c 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1099,7 +1099,11 @@ def int_is_fpclass
 //===--------------- Constrained Floating Point Intrinsics ----------------===//
 //
 
-let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
+/// IntrStrictFP - The intrinsic is allowed to be used in an alternate
+/// floating point environment.
+def IntrStrictFP : IntrinsicProperty;
+
+let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in {
   def int_experimental_constrained_fadd : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ LLVMMatchType<0>,
                                                       LLVMMatchType<0>,

diff  --git a/llvm/test/Assembler/fp-intrinsics-attr.ll b/llvm/test/Assembler/fp-intrinsics-attr.ll
new file mode 100644
index 00000000000000..6546d1a275c99f
--- /dev/null
+++ b/llvm/test/Assembler/fp-intrinsics-attr.ll
@@ -0,0 +1,318 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+; Test to verify that constrained intrinsics all have the strictfp attribute.
+; Ordering is from Intrinsics.td.
+
+define void @func(double %a, double %b, double %c, i32 %i) strictfp {
+; CHECK-LABEL: define void @func
+; CHECK-SAME: (double [[A:%.*]], double [[B:%.*]], double [[C:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
+
+  %add = call double @llvm.experimental.constrained.fadd.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %sub = call double @llvm.experimental.constrained.fsub.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %mul = call double @llvm.experimental.constrained.fmul.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %div = call double @llvm.experimental.constrained.fdiv.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %rem = call double @llvm.experimental.constrained.frem.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %fma = call double @llvm.experimental.constrained.fma.f64(
+                                               double %a, double %b, double %c,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %fmuladd = call double @llvm.experimental.constrained.fmuladd.f64(
+                                               double %a, double %b, double %c,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %si = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a,
+                                               metadata !"fpexcept.strict")
+
+  %ui = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a,
+                                               metadata !"fpexcept.strict")
+
+  %sfp = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %i,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %ufp = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %i,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %fptrunc = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %ext = call double @llvm.experimental.constrained.fpext.f64.f32(
+                                               float %fptrunc,
+                                               metadata !"fpexcept.strict")
+
+  %sqrt = call double @llvm.experimental.constrained.sqrt.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %powi = call double @llvm.experimental.constrained.powi.f64(
+                                               double %a, i32 %i,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %sin = call double @llvm.experimental.constrained.sin.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %cos = call double @llvm.experimental.constrained.cos.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %pow = call double @llvm.experimental.constrained.pow.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %log = call double @llvm.experimental.constrained.log.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %log10 = call double @llvm.experimental.constrained.log10.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %log2 = call double @llvm.experimental.constrained.log2.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %exp = call double @llvm.experimental.constrained.exp.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %exp2 = call double @llvm.experimental.constrained.exp2.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %rint = call double @llvm.experimental.constrained.rint.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %neari = call double @llvm.experimental.constrained.nearbyint.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %x32 = call i32 @llvm.experimental.constrained.lrint.i32.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %x64 = call i64 @llvm.experimental.constrained.llrint.i64.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %maxnum = call double @llvm.experimental.constrained.maxnum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %minnum = call double @llvm.experimental.constrained.minnum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %maxmum = call double @llvm.experimental.constrained.maximum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %minmum = call double @llvm.experimental.constrained.minimum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %ceil = call double @llvm.experimental.constrained.ceil.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %floor = call double @llvm.experimental.constrained.floor.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %y32 = call i32 @llvm.experimental.constrained.lround.i32.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %y64 = call i64 @llvm.experimental.constrained.llround.i64.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %round = call double @llvm.experimental.constrained.round.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %roundev = call double @llvm.experimental.constrained.roundeven.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %trunc = call double @llvm.experimental.constrained.trunc.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %q1 = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %a, double %b,
+                                               metadata !"oeq",
+                                               metadata !"fpexcept.strict")
+
+  %s1 = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %a, double %b,
+                                               metadata !"oeq",
+                                               metadata !"fpexcept.strict")
+
+; CHECK: ret void
+  ret void
+}
+
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fadd.f64({{.*}}) #[[ATTR1:[0-9]+]]
+
+declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fsub.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fmul.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fdiv.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.frem.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fma.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fmuladd.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.fptosi.i32.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.fptoui.i32.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.sitofp.f64.i32({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.uitofp.f64.i32({{.*}}) #[[ATTR1]]
+
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fptrunc.f32.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
+; CHECK: @llvm.experimental.constrained.fpext.f64.f32({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.sqrt.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.powi.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.sin.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.cos.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.pow.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.log.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.log10.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.log2.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.exp.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.exp2.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.rint.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.nearbyint.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.lrint.i32.f64({{.*}}) #[[ATTR1]]
+
+declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.llrint.i64.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.maxnum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.minnum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.maximum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.minimum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.ceil.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.floor.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.floor.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.lround.i32.f64({{.*}}) #[[ATTR1]]
+
+declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.llround.i64.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.round.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.round.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.roundeven.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.trunc.f64({{.*}}) #[[ATTR1]]
+
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fcmp.f64({{.*}}) #[[ATTR1]]
+
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fcmps.f64({{.*}}) #[[ATTR1]]
+
+; CHECK: attributes #[[ATTR0]] = {{{.*}} strictfp {{.*}}}
+; CHECK: attributes #[[ATTR1]] = { {{.*}} strictfp {{.*}} }
+

diff  --git a/llvm/test/Verifier/fp-intrinsics-pass.ll b/llvm/test/Verifier/fp-intrinsics-pass.ll
index 45b0278e8330e9..1cc2cb70be76fd 100644
--- a/llvm/test/Verifier/fp-intrinsics-pass.ll
+++ b/llvm/test/Verifier/fp-intrinsics-pass.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -passes=verify -S < %s 2>&1 | FileCheck %s
 
-declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) #0
-declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) #0
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
 
 ; Test that the verifier accepts legal code, and that the correct attributes are
 ; attached to the FP intrinsic. The attributes are checked at the bottom.
@@ -9,35 +9,34 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
 ; CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) #[[ATTR]]
 ; Note: FP exceptions aren't usually caught through normal unwind mechanisms,
 ;       but we may want to revisit this for asynchronous exception handling.
-define double @f1(double %a, double %b) #0 {
+define double @f1(double %a, double %b) strictfp {
 ; CHECK-LABEL: define double @f1
-; CHECK-SAME: (double [[A:%.*]], double [[B:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: (double [[A:%.*]], double [[B:%.*]]) #[[STRICTFP:[0-9]+]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FADD:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR1]]
+; CHECK-NEXT:    [[FADD:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.strict")
 ; CHECK-NEXT:    ret double [[FADD]]
 entry:
   %fadd = call double @llvm.experimental.constrained.fadd.f64(
                                                double %a, double %b,
                                                metadata !"round.dynamic",
-                                               metadata !"fpexcept.strict") #0
+                                               metadata !"fpexcept.strict")
   ret double %fadd
 }
 
-define double @f1u(double %a) #0 {
+define double @f1u(double %a) strictfp {
 ; CHECK-LABEL: define double @f1u
-; CHECK-SAME: (double [[A:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (double [[A:%.*]]) #[[STRICTFP]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FSQRT:%.*]] = call double @llvm.experimental.constrained.sqrt.f64(double [[A]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR1]]
+; CHECK-NEXT:    [[FSQRT:%.*]] = call double @llvm.experimental.constrained.sqrt.f64(double [[A]], metadata !"round.dynamic", metadata !"fpexcept.strict")
 ; CHECK-NEXT:    ret double [[FSQRT]]
 ;
 entry:
   %fsqrt = call double @llvm.experimental.constrained.sqrt.f64(
                                                double %a,
                                                metadata !"round.dynamic",
-                                               metadata !"fpexcept.strict") #0
+                                               metadata !"fpexcept.strict")
   ret double %fsqrt
 }
 
-attributes #0 = { strictfp }
-; TODO: Why is strictfp not in the below list?
-; CHECK: attributes #[[ATTR]] = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+; CHECK: attributes #[[ATTR]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) }
+; CHECK: attributes #[[STRICTFP]] = { strictfp }

diff  --git a/llvm/utils/TableGen/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/CodeGenIntrinsics.cpp
index f2c2688f6bdf67..7cb86ad95266da 100644
--- a/llvm/utils/TableGen/CodeGenIntrinsics.cpp
+++ b/llvm/utils/TableGen/CodeGenIntrinsics.cpp
@@ -74,6 +74,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R,
   isConvergent = false;
   isSpeculatable = false;
   hasSideEffects = false;
+  isStrictFP = false;
 
   if (DefName.size() <= 4 || DefName.substr(0, 4) != "int_")
     PrintFatalError(DefLoc,
@@ -203,6 +204,8 @@ void CodeGenIntrinsic::setProperty(Record *R) {
     isSpeculatable = true;
   else if (R->getName() == "IntrHasSideEffects")
     hasSideEffects = true;
+  else if (R->getName() == "IntrStrictFP")
+    isStrictFP = true;
   else if (R->isSubClassOf("NoCapture")) {
     unsigned ArgNo = R->getValueAsInt("ArgNo");
     addArgAttribute(ArgNo, NoCapture);

diff  --git a/llvm/utils/TableGen/CodeGenIntrinsics.h b/llvm/utils/TableGen/CodeGenIntrinsics.h
index c446e8102b69e8..f3452f5acea804 100644
--- a/llvm/utils/TableGen/CodeGenIntrinsics.h
+++ b/llvm/utils/TableGen/CodeGenIntrinsics.h
@@ -103,6 +103,9 @@ struct CodeGenIntrinsic {
   // True if the intrinsic is marked as speculatable.
   bool isSpeculatable;
 
+  // True if the intrinsic is marked as strictfp.
+  bool isStrictFP;
+
   enum ArgAttrKind {
     NoCapture,
     NoAlias,

diff  --git a/llvm/utils/TableGen/IntrinsicEmitter.cpp b/llvm/utils/TableGen/IntrinsicEmitter.cpp
index 25564d0f2f22ba..09aad78536fee7 100644
--- a/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -388,6 +388,9 @@ std::optional<bool> compareFnAttributes(const CodeGenIntrinsic *L,
   if (L->hasSideEffects != R->hasSideEffects)
     return R->hasSideEffects;
 
+  if (L->isStrictFP != R->isStrictFP)
+    return R->isStrictFP;
+
   // Try to order by readonly/readnone attribute.
   uint32_t LK = L->ME.toIntValue();
   uint32_t RK = R->ME.toIntValue();
@@ -522,6 +525,8 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
       OS << "      Attribute::get(C, Attribute::Convergent),\n";
     if (Intrinsic.isSpeculatable)
       OS << "      Attribute::get(C, Attribute::Speculatable),\n";
+    if (Intrinsic.isStrictFP)
+      OS << "      Attribute::get(C, Attribute::StrictFP),\n";
 
     MemoryEffects ME = Intrinsic.ME;
     // TODO: IntrHasSideEffects should affect not only readnone intrinsics.
@@ -594,7 +599,8 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
         Intrinsic.isNoReturn || Intrinsic.isNoCallback || Intrinsic.isNoSync ||
         Intrinsic.isNoFree || Intrinsic.isWillReturn || Intrinsic.isCold ||
         Intrinsic.isNoDuplicate || Intrinsic.isNoMerge ||
-        Intrinsic.isConvergent || Intrinsic.isSpeculatable) {
+        Intrinsic.isConvergent || Intrinsic.isSpeculatable ||
+        Intrinsic.isStrictFP) {
       unsigned ID = UniqFnAttributes.find(&Intrinsic)->second;
       OS << "      AS[" << numAttrs++ << "] = {AttributeList::FunctionIndex, "
          << "getIntrinsicFnAttributeSet(C, " << ID << ")};\n";


        


More information about the cfe-commits mailing list