[clang] abfbc55 - [FPEnv] clang should get from the AST the metadata for constrained FP builtins

Kevin P. Neal via cfe-commits cfe-commits at lists.llvm.org
Mon Nov 30 08:59:51 PST 2020


Author: Kevin P. Neal
Date: 2020-11-30T11:59:37-05:00
New Revision: abfbc5579bd4507ae286d4f29f8a157de0629372

URL: https://github.com/llvm/llvm-project/commit/abfbc5579bd4507ae286d4f29f8a157de0629372
DIFF: https://github.com/llvm/llvm-project/commit/abfbc5579bd4507ae286d4f29f8a157de0629372.diff

LOG: [FPEnv] clang should get from the AST the metadata for constrained FP builtins

Currently clang is not correctly retrieving from the AST the metadata for
constrained FP builtins. This patch fixes that for the non-target specific
builtins.

Differential Revision: https://reviews.llvm.org/D92122

Added: 
    clang/test/CodeGen/strictfp_fpclassify.c

Modified: 
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGen/builtin_float_strictfp.c
    clang/test/CodeGen/constrained-math-builtins.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 828d66f83de9..73897a27bd94 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -440,6 +440,7 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
 
   if (CGF.Builder.getIsFPConstrained()) {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
     return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
   } else {
@@ -457,6 +458,7 @@ static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
 
   if (CGF.Builder.getIsFPConstrained()) {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
     return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
   } else {
@@ -475,6 +477,7 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
 
   if (CGF.Builder.getIsFPConstrained()) {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
     return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
   } else {
@@ -556,6 +559,7 @@ emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
 
   if (CGF.Builder.getIsFPConstrained()) {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
                                        {ResultType, Src0->getType()});
     return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
@@ -2218,6 +2222,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     case Builtin::BI__builtin_fmodf16:
     case Builtin::BI__builtin_fmodl:
     case Builtin::BI__builtin_fmodf128: {
+      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
       Value *Arg1 = EmitScalarExpr(E->getArg(0));
       Value *Arg2 = EmitScalarExpr(E->getArg(1));
       return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
@@ -2828,6 +2833,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
   case Builtin::BI__builtin_isunordered: {
     // Ordered comparisons: we know the arguments to these are matching scalar
     // floating point values.
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *LHS = EmitScalarExpr(E->getArg(0));
     Value *RHS = EmitScalarExpr(E->getArg(1));
 
@@ -2856,6 +2863,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
   }
   case Builtin::BI__builtin_isnan: {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *V = EmitScalarExpr(E->getArg(0));
     V = Builder.CreateFCmpUNO(V, V, "cmp");
     return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
@@ -2919,6 +2928,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     // isinf(x)    --> fabs(x) == infinity
     // isfinite(x) --> fabs(x) != infinity
     // x != NaN via the ordered compare in either case.
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *V = EmitScalarExpr(E->getArg(0));
     Value *Fabs = EmitFAbs(*this, V);
     Constant *Infinity = ConstantFP::getInfinity(V->getType());
@@ -2931,6 +2942,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
 
   case Builtin::BI__builtin_isinf_sign: {
     // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *Arg = EmitScalarExpr(E->getArg(0));
     Value *AbsArg = EmitFAbs(*this, Arg);
     Value *IsInf = Builder.CreateFCmpOEQ(
@@ -2948,6 +2961,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
 
   case Builtin::BI__builtin_isnormal: {
     // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *V = EmitScalarExpr(E->getArg(0));
     Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
 
@@ -2976,6 +2991,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
   }
 
   case Builtin::BI__builtin_fpclassify: {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *V = EmitScalarExpr(E->getArg(5));
     llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
 

diff  --git a/clang/test/CodeGen/builtin_float_strictfp.c b/clang/test/CodeGen/builtin_float_strictfp.c
index dbd59091b228..001d136c8bd6 100644
--- a/clang/test/CodeGen/builtin_float_strictfp.c
+++ b/clang/test/CodeGen/builtin_float_strictfp.c
@@ -5,7 +5,6 @@
 
 // Test that the constrained intrinsics are picking up the exception
 // metadata from the AST instead of the global default from the command line.
-// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
 
 #pragma float_control(except, on)
 
@@ -14,37 +13,37 @@ void test_half(__fp16 *H, __fp16 *H2) {
   (void)__builtin_isgreater(*H, *H2);
   // FP16: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
   // FP16: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.maytrap")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
   (void)__builtin_isinf(*H);
-  // NOFP16: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.maytrap")
-  // FP16: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH7C00, metadata !"oeq", metadata !"fpexcept.maytrap")
+  // NOFP16: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict")
+  // FP16: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH7C00, metadata !"oeq", metadata !"fpexcept.strict")
 }
 
 // CHECK-LABEL: @test_mixed
 void test_mixed(double d1, float f2) {
   (void)__builtin_isgreater(d1, f2);
   // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ogt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ogt", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
   (void)__builtin_isgreaterequal(d1, f2);
   // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"oge", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"oge", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
   (void)__builtin_isless(d1, f2);
   // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"olt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"olt", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
   (void)__builtin_islessequal(d1, f2);
   // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ole", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ole", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
   (void)__builtin_islessgreater(d1, f2);
   // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"one", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"one", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
   (void)__builtin_isunordered(d1, f2);
   // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
-  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"uno", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"uno", metadata !"fpexcept.strict")
   // CHECK-NEXT: zext i1
 }

diff  --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c
index b1a656b2c25d..d817cce33679 100644
--- a/clang/test/CodeGen/constrained-math-builtins.c
+++ b/clang/test/CodeGen/constrained-math-builtins.c
@@ -10,171 +10,290 @@
 void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
   f = __builtin_fmod(f,f);    f = __builtin_fmodf(f,f);   f =  __builtin_fmodl(f,f); f = __builtin_fmodf128(f,f);
 
+// CHECK: call double @llvm.experimental.constrained.frem.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.frem.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.frem.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_pow(f,f);        __builtin_powf(f,f);       __builtin_powl(f,f); __builtin_powf128(f,f);
+
+// CHECK: call double @llvm.experimental.constrained.pow.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.pow.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.pow.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_powi(f,f);        __builtin_powif(f,f);       __builtin_powil(f,f);
+
+// CHECK: call double @llvm.experimental.constrained.powi.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.powi.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_ceil(f);       __builtin_ceilf(f);      __builtin_ceill(f); __builtin_ceilf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.ceil.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.ceil.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_cos(f);        __builtin_cosf(f);       __builtin_cosl(f); __builtin_cosf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.cos.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.cos.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.cos.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_exp(f);        __builtin_expf(f);       __builtin_expl(f); __builtin_expf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.exp.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.exp.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.exp.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_exp2(f);       __builtin_exp2f(f);      __builtin_exp2l(f); __builtin_exp2f128(f);
+
+// CHECK: call double @llvm.experimental.constrained.exp2.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.exp2.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_floor(f);      __builtin_floorf(f);     __builtin_floorl(f); __builtin_floorf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.floor.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.floor.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.floor.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_fma(f,f,f);        __builtin_fmaf(f,f,f);       __builtin_fmal(f,f,f); __builtin_fmaf128(f,f,f);
+
+// CHECK: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.fma.f128(fp128 %{{.*}}, fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_fmax(f,f);       __builtin_fmaxf(f,f);      __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);
+
+// CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_fmin(f,f);       __builtin_fminf(f,f);      __builtin_fminl(f,f); __builtin_fminf128(f,f);
+
+// CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builtin_llrintf128(f);
+
+// CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_llround(f);    __builtin_llroundf(f);   __builtin_llroundl(f); __builtin_llroundf128(f);
+
+// CHECK: call i64 @llvm.experimental.constrained.llround.i64.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.llround.i64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_log(f);        __builtin_logf(f);       __builtin_logl(f); __builtin_logf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.log.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.log.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.log.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_log10(f);      __builtin_log10f(f);     __builtin_log10l(f); __builtin_log10f128(f);
+
+// CHECK: call double @llvm.experimental.constrained.log10.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.log10.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.log10.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_log2(f);       __builtin_log2f(f);      __builtin_log2l(f); __builtin_log2f128(f);
+
+// CHECK: call double @llvm.experimental.constrained.log2.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.log2.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.log2.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_lrint(f);      __builtin_lrintf(f);     __builtin_lrintl(f); __builtin_lrintf128(f);
+
+// CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_lround(f);     __builtin_lroundf(f);    __builtin_lroundl(f); __builtin_lroundf128(f);
+
+// CHECK: call i64 @llvm.experimental.constrained.lround.i64.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.lround.i64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call i64 @llvm.experimental.constrained.lround.i64.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_nearbyint(f);  __builtin_nearbyintf(f); __builtin_nearbyintl(f); __builtin_nearbyintf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.nearbyint.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_rint(f);       __builtin_rintf(f);      __builtin_rintl(f); __builtin_rintf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.rint.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.rint.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.rint.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_round(f);      __builtin_roundf(f);     __builtin_roundl(f); __builtin_roundf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.round.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.round.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.round.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+
+  __builtin_sin(f);        __builtin_sinf(f);       __builtin_sinl(f); __builtin_sinf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.sin.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.sin.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.sin.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_sqrt(f);       __builtin_sqrtf(f);      __builtin_sqrtl(f); __builtin_sqrtf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+  __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+};
+
 // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.frem.f128(fp128, fp128, metadata, metadata)
 
-  __builtin_pow(f,f);        __builtin_powf(f,f);       __builtin_powl(f,f); __builtin_powf128(f,f);
-
 // CHECK: declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata)
 
-  __builtin_powi(f,f);        __builtin_powif(f,f);       __builtin_powil(f,f);
-
 // CHECK: declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata)
 
-  __builtin_ceil(f);       __builtin_ceilf(f);      __builtin_ceill(f); __builtin_ceilf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
 // CHECK: declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
 
-  __builtin_cos(f);        __builtin_cosf(f);       __builtin_cosl(f); __builtin_cosf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata)
 
-  __builtin_exp(f);        __builtin_expf(f);       __builtin_expl(f); __builtin_expf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
 
-  __builtin_exp2(f);       __builtin_exp2f(f);      __builtin_exp2l(f); __builtin_exp2f128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
 
-  __builtin_floor(f);      __builtin_floorf(f);     __builtin_floorl(f); __builtin_floorf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.floor.f64(double, metadata)
 // CHECK: declare float @llvm.experimental.constrained.floor.f32(float, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
 
-  __builtin_fma(f,f,f);        __builtin_fmaf(f,f,f);       __builtin_fmal(f,f,f); __builtin_fmaf128(f,f,f);
-
 // CHECK: declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, metadata)
 
-  __builtin_fmax(f,f);       __builtin_fmaxf(f,f);      __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);
-
 // CHECK: declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
 // CHECK: declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
 
-  __builtin_fmin(f,f);       __builtin_fminf(f,f);      __builtin_fminl(f,f); __builtin_fminf128(f,f);
-
 // CHECK: declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
 // CHECK: declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
 
-  __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builtin_llrintf128(f);
-
 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
 
-  __builtin_llround(f);    __builtin_llroundf(f);   __builtin_llroundl(f); __builtin_llroundf128(f);
-
 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
 
-  __builtin_log(f);        __builtin_logf(f);       __builtin_logl(f); __builtin_logf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata)
 
-  __builtin_log10(f);      __builtin_log10f(f);     __builtin_log10l(f); __builtin_log10f128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata)
 
-  __builtin_log2(f);       __builtin_log2f(f);      __builtin_log2l(f); __builtin_log2f128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.log2.f128(fp128, metadata, metadata)
 
-  __builtin_lrint(f);      __builtin_lrintf(f);     __builtin_lrintl(f); __builtin_lrintf128(f);
-
 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f64(double, metadata, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f32(float, metadata, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80, metadata, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f128(fp128, metadata, metadata)
 
-  __builtin_lround(f);     __builtin_lroundf(f);    __builtin_lroundl(f); __builtin_lroundf128(f);
-
 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f64(double, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f32(float, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80, metadata)
 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f128(fp128, metadata)
 
-  __builtin_nearbyint(f);  __builtin_nearbyintf(f); __builtin_nearbyintl(f); __builtin_nearbyintf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
 
-  __builtin_rint(f);       __builtin_rintf(f);      __builtin_rintl(f); __builtin_rintf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
 
-  __builtin_round(f);      __builtin_roundf(f);     __builtin_roundl(f); __builtin_roundf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.round.f64(double, metadata)
 // CHECK: declare float @llvm.experimental.constrained.round.f32(float, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
 
-  __builtin_sin(f);        __builtin_sinf(f);       __builtin_sinl(f); __builtin_sinf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
 
-  __builtin_sqrt(f);       __builtin_sqrtf(f);      __builtin_sqrtl(f); __builtin_sqrtf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
 // CHECK: declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
 
-  __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f);
-
 // CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 // CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
-};
 
 #pragma STDC FP_CONTRACT ON
 void bar(float f) {

diff  --git a/clang/test/CodeGen/strictfp_fpclassify.c b/clang/test/CodeGen/strictfp_fpclassify.c
new file mode 100644
index 000000000000..404dba431687
--- /dev/null
+++ b/clang/test/CodeGen/strictfp_fpclassify.c
@@ -0,0 +1,130 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 %s -emit-llvm -ffp-exception-behavior=maytrap -o - -triple x86_64-unknown-unknown | FileCheck %s
+
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: these functions shouldn't trap on SNaN.
+
+#pragma float_control(except, on)
+
+int printf(const char *, ...);
+
+// CHECK-LABEL: @p(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[STR_ADDR:%.*]] = alloca i8*, align 8
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i8* [[STR:%.*]], i8** [[STR_ADDR]], align 8
+// CHECK-NEXT:    store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
+// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) [[ATTR4:#.*]]
+// CHECK-NEXT:    ret void
+//
+void p(char *str, int x) {
+  printf("%s: %d\n", str, x);
+}
+
+#define P(n,args) p(#n #args, __builtin_##n args)
+
+// CHECK-LABEL: @test_fpclassify(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
+// CHECK:       fpclassify_end:
+// CHECK-NEXT:    [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @.str.1, i64 0, i64 0), i32 [[FPCLASSIFY_RESULT]]) [[ATTR4]]
+// CHECK-NEXT:    ret void
+// CHECK:       fpclassify_not_zero:
+// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
+// CHECK:       fpclassify_not_nan:
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5:#.*]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
+// CHECK:       fpclassify_not_inf:
+// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
+// CHECK-NEXT:    br label [[FPCLASSIFY_END]]
+//
+void test_fpclassify(double d) {
+  P(fpclassify, (0, 1, 2, 3, 4, d));
+
+  return;
+}
+
+// CHECK-LABEL: @test_isinf(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
+// CHECK-NEXT:    [[CMPINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[CMPINF]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP2]]) [[ATTR4]]
+// CHECK-NEXT:    ret void
+//
+void test_isinf(double d) {
+  P(isinf, (d));
+
+  return;
+}
+
+// CHECK-LABEL: @test_isinf_sign(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
+// CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
+// CHECK-NEXT:    [[TMP5:%.*]] = select i1 [[ISINF]], i32 [[TMP4]], i32 0
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP5]]) [[ATTR4]]
+// CHECK-NEXT:    ret void
+//
+void test_isinf_sign(double d) {
+  P(isinf_sign, (d));
+
+  return;
+}
+
+// CHECK-LABEL: @test_isnan(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = zext i1 [[CMP]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.4, i64 0, i64 0), i32 [[TMP1]]) [[ATTR4]]
+// CHECK-NEXT:    ret void
+//
+void test_isnan(double d) {
+  P(isnan, (d));
+
+  return;
+}
+
+// CHECK-LABEL: @test_isnormal(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[ISEQ:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"ult", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[AND:%.*]] = and i1 [[ISEQ]], [[ISINF]]
+// CHECK-NEXT:    [[AND1:%.*]] = and i1 [[AND]], [[ISNORMAL]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[AND1]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.5, i64 0, i64 0), i32 [[TMP2]]) [[ATTR4]]
+// CHECK-NEXT:    ret void
+//
+void test_isnormal(double d) {
+  P(isnormal, (d));
+
+  return;
+}


        


More information about the cfe-commits mailing list