[llvm] FunctionAttrs: Basic propagation of nofpclass (PR #182444)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 19 22:52:44 PST 2026


https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/182444

Perform caller->callee propagation of nofpclass on callsites. As
far as I can tell the only prior callsite to callee propagation here
was for norecurse. This doesn't handle transitive callers.

I was hoping to avoid doing this, and instead get attributor/attributor-light
enabled in the default pass pipeline. nofpclass propagation enabled by
default is the main blocker for eliminating the finite_only_opt global
check in device-libs, but this single level of propagation is most likely
sufficient for that use. Implemnting this here is probably the most expedient
path to removing the control library.

>From a4107278677b24bc47dce2335369ba8bbb372fc8 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Thu, 5 Feb 2026 07:38:09 +0100
Subject: [PATCH] FunctionAttrs: Basic propagation of nofpclass

Perform caller->callee propagation of nofpclass on callsites. As
far as I can tell the only prior callsite to callee propagation here
was for norecurse. This doesn't handle transitive callers.

I was hoping to avoid doing this, and instead get attributor/attributor-light
enabled in the default pass pipeline. nofpclass propagation enabled by
default is the main blocker for eliminating the finite_only_opt global
check in device-libs, but this single level of propagation is most likely
sufficient for that use. Implemnting this here is probably the most expedient
path to removing the control library.
---
 llvm/lib/Transforms/IPO/FunctionAttrs.cpp     |  53 ++-
 .../Transforms/FunctionAttrs/nofpclass.ll     | 317 ++++++++++++++++++
 2 files changed, 366 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/Transforms/FunctionAttrs/nofpclass.ll

diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 626d28d4c50d8..ffccce0b0f5eb 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -2376,12 +2376,13 @@ static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) {
 }
 
 static bool addNoRecurseAttrsTopDown(Function &F) {
+  if (F.doesNotRecurse())
+    return false;
+
   // We check the preconditions for the function prior to calling this to avoid
   // the cost of building up a reversible post-order list. We assert them here
   // to make sure none of the invariants this relies on were violated.
   assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!");
-  assert(!F.doesNotRecurse() &&
-         "This function has already been deduced as norecurs!");
   assert(F.hasInternalLinkage() &&
          "Can only do top-down deduction for internal linkage functions!");
 
@@ -2404,6 +2405,48 @@ static bool addNoRecurseAttrsTopDown(Function &F) {
   return true;
 }
 
+static bool addNoFPClassAttrsTopDown(Function &F) {
+  assert(!F.isDeclaration() && "Cannot deduce nofpclass without a definition!");
+  unsigned NumArgs = F.arg_size();
+  SmallVector<FPClassTest, 8> ArgsNoFPClass(NumArgs, fcAllFlags);
+  FPClassTest RetNoFPClass = fcAllFlags;
+
+  bool Changed = false;
+  for (User *U : F.users()) {
+    auto *CB = dyn_cast<CallBase>(U);
+    if (!CB || CB->getCalledFunction() != &F)
+      return false;
+
+    RetNoFPClass &= CB->getRetNoFPClass();
+    for (unsigned I = 0; I != NumArgs; ++I)
+      ArgsNoFPClass[I] &= CB->getParamNoFPClass(I);
+  }
+
+  LLVMContext &Ctx = F.getContext();
+
+  if (RetNoFPClass != fcNone) {
+    FPClassTest OldAttr = F.getAttributes().getRetNoFPClass();
+    if (OldAttr != RetNoFPClass) {
+      F.addRetAttr(Attribute::getWithNoFPClass(Ctx, RetNoFPClass | OldAttr));
+      Changed = true;
+    }
+  }
+
+  for (unsigned I = 0; I != NumArgs; ++I) {
+    FPClassTest ArgNoFPClass = ArgsNoFPClass[I];
+    if (ArgNoFPClass == fcNone)
+      continue;
+    FPClassTest OldAttr = F.getParamNoFPClass(I);
+    if (OldAttr == ArgNoFPClass)
+      continue;
+
+    F.addParamAttr(I, Attribute::getWithNoFPClass(Ctx, ArgNoFPClass | OldAttr));
+    Changed = true;
+  }
+
+  return Changed;
+}
+
 static bool deduceFunctionAttributeInRPO(Module &M, LazyCallGraph &CG) {
   // We only have a post-order SCC traversal (because SCCs are inherently
   // discovered in post-order), so we accumulate them in a vector and then walk
@@ -2420,13 +2463,15 @@ static bool deduceFunctionAttributeInRPO(Module &M, LazyCallGraph &CG) {
       if (SCC.size() != 1)
         continue;
       Function &F = SCC.begin()->getFunction();
-      if (!F.isDeclaration() && !F.doesNotRecurse() && F.hasInternalLinkage())
+      if (!F.isDeclaration() && F.hasInternalLinkage())
         Worklist.push_back(&F);
     }
   }
   bool Changed = false;
-  for (auto *F : llvm::reverse(Worklist))
+  for (auto *F : llvm::reverse(Worklist)) {
     Changed |= addNoRecurseAttrsTopDown(*F);
+    Changed |= addNoFPClassAttrsTopDown(*F);
+  }
 
   return Changed;
 }
diff --git a/llvm/test/Transforms/FunctionAttrs/nofpclass.ll b/llvm/test/Transforms/FunctionAttrs/nofpclass.ll
new file mode 100644
index 0000000000000..893e59dad45fc
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/nofpclass.ll
@@ -0,0 +1,317 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 6
+; RUN: opt -S -passes='cgscc(function-attrs),rpo-function-attrs' < %s | FileCheck %s
+
+define float @return_f32_extern(ptr %ptr) {
+; CHECK-LABEL: define float @return_f32_extern(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[VAL:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret float [[VAL]]
+;
+  %val = load volatile float, ptr %ptr
+  ret float %val
+}
+
+; Deduce nofpclass(inf) on return
+define internal float @only_noinf_ret_uses(ptr %ptr) {
+; CHECK-LABEL: define internal nofpclass(inf) float @only_noinf_ret_uses(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[VAL:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret float [[VAL]]
+;
+  %val = load volatile float, ptr %ptr
+  ret float %val
+}
+
+define float @calls_no_inf_return(ptr %ptr) {
+; CHECK-LABEL: define float @calls_no_inf_return(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[CALL0:%.*]] = call float @return_f32_extern(ptr [[PTR]])
+; CHECK-NEXT:    [[CALL1:%.*]] = call nofpclass(inf) float @only_noinf_ret_uses(ptr [[PTR]])
+; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[CALL0]], [[CALL1]]
+; CHECK-NEXT:    ret float [[ADD]]
+;
+  %call0 = call float @return_f32_extern(ptr %ptr)
+  %call1 = call nofpclass(inf) float @only_noinf_ret_uses(ptr %ptr)
+  %add = fadd float %call0, %call1
+  ret float %add
+}
+
+; Deduce nofpclass(nan) on return, not inf or zero
+define internal float @merged_ret_uses(ptr %ptr) {
+; CHECK-LABEL: define internal nofpclass(nan) float @merged_ret_uses(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[VAL:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret float [[VAL]]
+;
+  %val = load volatile float, ptr %ptr
+  ret float %val
+}
+
+define float @calls_merge_rets(ptr %ptr) {
+; CHECK-LABEL: define float @calls_merge_rets(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[CALL0:%.*]] = call nofpclass(nan inf) float @merged_ret_uses(ptr [[PTR]])
+; CHECK-NEXT:    [[CALL1:%.*]] = call nofpclass(nan zero) float @merged_ret_uses(ptr [[PTR]])
+; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[CALL0]], [[CALL1]]
+; CHECK-NEXT:    ret float [[ADD]]
+;
+  %call0 = call nofpclass(nan inf) float @merged_ret_uses(ptr %ptr)
+  %call1 = call nofpclass(nan zero) float @merged_ret_uses(ptr %ptr)
+  %add = fadd float %call0, %call1
+  ret float %add
+}
+
+; Do not infer nofpclass on return
+define internal float @called_with_wrong_ret_type(ptr %ptr) {
+; CHECK-LABEL: define internal float @called_with_wrong_ret_type(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[VAL:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret float [[VAL]]
+;
+  %val = load volatile float, ptr %ptr
+  ret float %val
+}
+
+define <2 x half> @wrong_callee_ret_type(ptr %ptr) {
+; CHECK-LABEL: define <2 x half> @wrong_callee_ret_type(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:    [[RET:%.*]] = call nofpclass(nan) <2 x half> @called_with_wrong_ret_type(ptr [[PTR]])
+; CHECK-NEXT:    ret <2 x half> [[RET]]
+;
+  %ret = call nofpclass(nan) <2 x half> @called_with_wrong_ret_type(ptr %ptr)
+  ret <2 x half> %ret
+}
+
+; Do not infer nofpclass on return
+define internal float @non_callee_use_ret(ptr %ptr) {
+; CHECK-LABEL: define internal float @non_callee_use_ret(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[VAL:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret float [[VAL]]
+;
+  %val = load volatile float, ptr %ptr
+  ret float %val
+}
+
+declare float @uses_func_ptr(ptr)
+
+define float @caller_non_callee_use(ptr %ptr) {
+; CHECK-LABEL: define float @caller_non_callee_use(
+; CHECK-SAME: ptr readnone captures(none) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[RET:%.*]] = call nofpclass(nan) float @uses_func_ptr(ptr @non_callee_use_ret)
+; CHECK-NEXT:    ret float [[RET]]
+;
+  %ret = call nofpclass(nan) float @uses_func_ptr(ptr @non_callee_use_ret)
+  ret float %ret
+}
+
+define internal { double, double } @mixed_ret_and_args(float %scalar.arg, i32 %not.fp, <2 x double> %vec, { double, double } %struct) {
+; CHECK-LABEL: define internal nofpclass(sub) { double, double } @mixed_ret_and_args(
+; CHECK-SAME: float nofpclass(nan) [[SCALAR_ARG:%.*]], i32 [[NOT_FP:%.*]], <2 x double> nofpclass(inf) [[VEC:%.*]], { double, double } nofpclass(zero) [[STRUCT:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT:    [[FPEXT:%.*]] = fpext float [[SCALAR_ARG]] to double
+; CHECK-NEXT:    [[VEC_0:%.*]] = extractelement <2 x double> [[VEC]], i32 0
+; CHECK-NEXT:    [[INSERT_0:%.*]] = insertvalue { double, double } poison, double [[FPEXT]], 0
+; CHECK-NEXT:    [[INSERT_1:%.*]] = insertvalue { double, double } poison, double [[VEC_0]], 0
+; CHECK-NEXT:    ret { double, double } [[INSERT_1]]
+;
+  %fpext = fpext float %scalar.arg to double
+  %vec.0 = extractelement <2 x double> %vec, i32 0
+  %insert.0 = insertvalue { double, double } poison, double %fpext, 0
+  %insert.1 = insertvalue { double, double } poison, double %vec.0, 0
+  ret { double, double } %insert.1
+}
+
+define void @call_mixed_ret_and_args_0(float %a, i32 %b, <2 x double> %c, { double, double } %d) {
+; CHECK-LABEL: define void @call_mixed_ret_and_args_0(
+; CHECK-SAME: float [[A:%.*]], i32 [[B:%.*]], <2 x double> [[C:%.*]], { double, double } [[D:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT:    [[RESULT:%.*]] = call nofpclass(sub) { double, double } @mixed_ret_and_args(float nofpclass(nan) [[A]], i32 [[B]], <2 x double> nofpclass(inf) [[C]], { double, double } nofpclass(zero) [[D]])
+; CHECK-NEXT:    ret void
+;
+  %result = call nofpclass(sub) { double, double } @mixed_ret_and_args(float nofpclass(nan) %a, i32 %b, <2 x double> nofpclass(inf) %c, { double, double } nofpclass(zero) %d)
+  ret void
+}
+
+define internal float @merged_arg_uses(float %arg0, half %arg1, half %arg2) {
+; CHECK-LABEL: define internal float @merged_arg_uses(
+; CHECK-SAME: float nofpclass(nan) [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT:    [[FPEXT0:%.*]] = fpext half [[ARG1]] to float
+; CHECK-NEXT:    [[FPEXT1:%.*]] = fpext half [[ARG1]] to float
+; CHECK-NEXT:    [[ADD0:%.*]] = fadd float [[ARG0]], [[FPEXT0]]
+; CHECK-NEXT:    [[ADD1:%.*]] = fadd float [[ADD0]], [[FPEXT1]]
+; CHECK-NEXT:    ret float [[ADD1]]
+;
+  %fpext0 = fpext half %arg1 to float
+  %fpext1 = fpext half %arg1 to float
+  %add0 = fadd float %arg0, %fpext0
+  %add1 = fadd float %add0, %fpext1
+  ret float %add1
+}
+
+define float @calls_merged_arg_uses(float %arg0, half %arg1, half %arg2) {
+; CHECK-LABEL: define float @calls_merged_arg_uses(
+; CHECK-SAME: float [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT:    [[CALL0:%.*]] = call float @merged_arg_uses(float nofpclass(nan inf) [[ARG0]], half nofpclass(zero) [[ARG1]], half nofpclass(zero) [[ARG2]])
+; CHECK-NEXT:    [[CALL1:%.*]] = call float @merged_arg_uses(float nofpclass(nan) [[ARG0]], half nofpclass(sub) [[ARG1]], half [[ARG2]])
+; CHECK-NEXT:    [[RET:%.*]] = fadd float [[CALL0]], [[CALL1]]
+; CHECK-NEXT:    ret float [[RET]]
+;
+  %call0 = call float @merged_arg_uses(float nofpclass(inf nan) %arg0, half nofpclass(zero) %arg1, half nofpclass(zero) %arg2)
+  %call1 = call float @merged_arg_uses(float nofpclass(nan) %arg0, half nofpclass(sub) %arg1, half %arg2)
+  %ret = fadd float %call0, %call1
+  ret float %ret
+}
+
+define internal float @self_recursive_callsite_attrs(float %x) {
+; CHECK-LABEL: define internal float @self_recursive_callsite_attrs(
+; CHECK-SAME: float [[X:%.*]]) {
+; CHECK-NEXT:    [[RET:%.*]] = call nofpclass(inf) float @self_recursive_callsite_attrs(float nofpclass(nan) [[X]])
+; CHECK-NEXT:    ret float [[RET]]
+;
+  %ret = call nofpclass(inf) float @self_recursive_callsite_attrs(float nofpclass(nan) %x)
+  ret float %ret
+}
+
+define internal float @mutually_recursive0(float %arg) {
+; CHECK-LABEL: define internal float @mutually_recursive0(
+; CHECK-SAME: float [[ARG:%.*]]) {
+; CHECK-NEXT:    [[CALL:%.*]] = call nofpclass(nan) float @mutually_recursive1(float [[ARG]])
+; CHECK-NEXT:    ret float [[CALL]]
+;
+  %call = call nofpclass(nan) float @mutually_recursive1(float %arg)
+  ret float %call
+}
+
+define internal float @mutually_recursive1(float %arg) {
+; CHECK-LABEL: define internal float @mutually_recursive1(
+; CHECK-SAME: float [[ARG:%.*]]) {
+; CHECK-NEXT:    [[CALL:%.*]] = call float @mutually_recursive0(float [[ARG]])
+; CHECK-NEXT:    ret float [[CALL]]
+;
+  %call = call float @mutually_recursive0(float %arg)
+  ret float %call
+}
+
+define internal void @infer_arg_from_constants(float %a, <2 x half> %b, float %c, float %d) {
+; CHECK-LABEL: define internal void @infer_arg_from_constants(
+; CHECK-SAME: float [[A:%.*]], <2 x half> [[B:%.*]], float [[C:%.*]], float [[D:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT:    ret void
+;
+  ret void
+}
+
+define void @call_infer_arg_from_constants() {
+; CHECK-LABEL: define void @call_infer_arg_from_constants(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT:    call void @infer_arg_from_constants(float 0.000000e+00, <2 x half> <half 0xH3C00, half 0xHBC00>, float poison, float 0x7FF8000000000000)
+; CHECK-NEXT:    ret void
+;
+  call void @infer_arg_from_constants(float 0.0, <2 x half> <half 1.0, half -1.0>, float poison, float 0x7FF8000000000000)
+  ret void
+}
+
+define internal void @infer_arg_from_load(float %arg) {
+; CHECK-LABEL: define internal void @infer_arg_from_load(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT:    ret void
+;
+  ret void
+}
+
+define void @call_infer_arg_from_load(ptr %ptr) {
+; CHECK-LABEL: define void @call_infer_arg_from_load(
+; CHECK-SAME: ptr readonly captures(none) [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT:    [[NOT_NAN:%.*]] = load float, ptr [[PTR]], align 4, !nofpclass [[META0:![0-9]+]]
+; CHECK-NEXT:    call void @infer_arg_from_load(float [[NOT_NAN]])
+; CHECK-NEXT:    ret void
+;
+  %not.nan = load float, ptr %ptr, !nofpclass !{i32 3}
+  call void @infer_arg_from_load(float %not.nan)
+  ret void
+}
+
+; Expand ret nofpclass(inf nan), arg to nofpclass(inf zero)
+define internal nofpclass(nan) float @refine_existing_nofpclass(float nofpclass(inf) %arg, ptr %ptr) {
+; CHECK-LABEL: define internal nofpclass(nan inf) float @refine_existing_nofpclass(
+; CHECK-SAME: float nofpclass(inf zero) [[ARG:%.*]], ptr readonly captures(none) [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %load = load float, ptr %ptr
+  ret float %load
+}
+
+define float @caller_refine_existing_nofpclass(float %arg, ptr %ptr) {
+; CHECK-LABEL: define float @caller_refine_existing_nofpclass(
+; CHECK-SAME: float [[ARG:%.*]], ptr readonly captures(none) [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT:    [[CALL:%.*]] = call nofpclass(inf) float @refine_existing_nofpclass(float nofpclass(zero) [[ARG]], ptr [[PTR]])
+; CHECK-NEXT:    ret float [[CALL]]
+;
+  %call = call nofpclass(inf) float @refine_existing_nofpclass(float nofpclass(zero) %arg, ptr %ptr)
+  ret float %call
+}
+
+; Do not infer nofpclass
+define internal float @nofpclass_non_call_user(float %arg, ptr %ptr) {
+; CHECK-LABEL: define internal float @nofpclass_non_call_user(
+; CHECK-SAME: float [[ARG:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[LOAD:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[ARG]], [[LOAD]]
+; CHECK-NEXT:    ret float [[ADD]]
+;
+  %load = load volatile float, ptr %ptr
+  %add = fadd float %arg, %load
+  ret float %add
+}
+
+define float @call_nofpclass_non_call_user(float %arg, ptr %ptr, ptr %fptr.ptr) {
+; CHECK-LABEL: define float @call_nofpclass_non_call_user(
+; CHECK-SAME: float [[ARG:%.*]], ptr [[PTR:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[FPTR_PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[RET:%.*]] = call nofpclass(nan) float @nofpclass_non_call_user(float nofpclass(nan) [[ARG]], ptr [[PTR]])
+; CHECK-NEXT:    store ptr @nofpclass_non_call_user, ptr [[FPTR_PTR]], align 8
+; CHECK-NEXT:    ret float [[RET]]
+;
+  %ret = call nofpclass(nan) float @nofpclass_non_call_user(float nofpclass(nan) %arg, ptr %ptr)
+  store ptr @nofpclass_non_call_user, ptr %fptr.ptr
+  ret float %ret
+}
+
+; TODO: This case is missed
+define internal float @transitive_nonan_callee0(float %arg, ptr %ptr) {
+; CHECK-LABEL: define internal float @transitive_nonan_callee0(
+; CHECK-SAME: float [[ARG:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[LOAD:%.*]] = load volatile float, ptr [[PTR]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[ARG]], [[LOAD]]
+; CHECK-NEXT:    ret float [[ADD]]
+;
+  %load = load volatile float, ptr %ptr
+  %add = fadd float %arg, %load
+  ret float %add
+}
+
+define internal float @transitive_nonan_callee1(float %arg, ptr %ptr) {
+; CHECK-LABEL: define internal nofpclass(nan) float @transitive_nonan_callee1(
+; CHECK-SAME: float nofpclass(nan) [[ARG:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[RET:%.*]] = call float @transitive_nonan_callee0(float [[ARG]], ptr [[PTR]])
+; CHECK-NEXT:    ret float [[RET]]
+;
+  %ret = call float @transitive_nonan_callee0(float %arg, ptr %ptr)
+  ret float %ret
+}
+
+define float @caller_transitive_nonan(float %arg, ptr %ptr) {
+; CHECK-LABEL: define float @caller_transitive_nonan(
+; CHECK-SAME: float [[ARG:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[RET:%.*]] = call nofpclass(nan) float @transitive_nonan_callee1(float nofpclass(nan) [[ARG]], ptr [[PTR]])
+; CHECK-NEXT:    ret float [[RET]]
+;
+  %ret = call nofpclass(nan) float @transitive_nonan_callee1(float nofpclass(nan) %arg, ptr %ptr)
+  ret float %ret
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
+; CHECK: attributes #[[ATTR1]] = { mustprogress nofree nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
+; CHECK: attributes #[[ATTR2]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: read) }
+;.
+; CHECK: [[META0]] = !{i32 3}
+;.



More information about the llvm-commits mailing list