[clang] [llvm] Clang: Add nsz to llvm.minnum and llvm.maxnum emitted from fmin and fmax (PR #113133)

YunQiang Su via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 16 02:34:33 PDT 2025


https://github.com/wzssyqa updated https://github.com/llvm/llvm-project/pull/113133

>From 074a74ee46133227ba880715416bf0481e773ecc Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Mon, 21 Oct 2024 15:18:38 +0800
Subject: [PATCH] Clang: emit llvm.minnum and llvm.maxnum with nsz always

See: https://github.com/llvm/llvm-project/pull/112852

We will define llvm.minnum and llvm.maxnum with +0.0>-0.0, by default,
while libc doesn't require it.

fix testcases

-ffp-exception-behavior=strict

add missing builtin test

test auto vectorize

fix test cases

update testcase

disable-llvm-passes

fix elementswise

fix some tests
---
 clang/lib/CodeGen/CGBuiltin.cpp               |   47 +-
 clang/test/CodeGen/RISCV/math-builtins.c      |   24 +-
 .../test/CodeGen/builtins-elementwise-math.c  |   24 +-
 clang/test/CodeGen/builtins.c                 |   12 +-
 .../test/CodeGen/constrained-math-builtins.c  |   16 +-
 clang/test/CodeGen/fmaxnum_fminnum_use_nsz.c  | 1129 +++++++++++++++++
 clang/test/CodeGen/math-builtins-long.c       |   16 +-
 .../CodeGen/strictfp-elementwise-builtins.cpp |    4 +-
 clang/test/CodeGenOpenCL/builtins-f16.cl      |    4 +-
 clang/test/Headers/__clang_hip_math.hip       |   64 +-
 .../amdgcn_openmp_device_math_constexpr.cpp   |    8 +-
 llvm/include/llvm/IR/IRBuilder.h              |   16 +-
 llvm/lib/IR/IRBuilder.cpp                     |    4 +-
 13 files changed, 1257 insertions(+), 111 deletions(-)
 create mode 100644 clang/test/CodeGen/fmaxnum_fminnum_use_nsz.c

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 1e4e055e04afd..1d26110a4dfbe 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -574,19 +574,20 @@ Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
 
 // Emit an intrinsic that has 2 operands of the same type as its result.
 // Depending on mode, this may be a constrained floating-point intrinsic.
-static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
-                                const CallExpr *E, unsigned IntrinsicID,
-                                unsigned ConstrainedIntrinsicID) {
+static Value *emitBinaryMaybeConstrainedFPBuiltin(
+    CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID,
+    unsigned ConstrainedIntrinsicID, llvm::FastMathFlags *FMF = nullptr) {
   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
 
   CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
   if (CGF.Builder.getIsFPConstrained()) {
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
-    return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
+    return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1}, "",
+                                               std::nullopt, std::nullopt, FMF);
   } else {
     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
-    return CGF.Builder.CreateCall(F, { Src0, Src1 });
+    return CGF.Builder.CreateCall(F, {Src0, Src1}, "", nullptr, FMF);
   }
 }
 
@@ -2612,10 +2613,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     case Builtin::BI__builtin_fmaxf:
     case Builtin::BI__builtin_fmaxf16:
     case Builtin::BI__builtin_fmaxl:
-    case Builtin::BI__builtin_fmaxf128:
-      return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::maxnum,
-                                   Intrinsic::experimental_constrained_maxnum));
+    case Builtin::BI__builtin_fmaxf128: {
+      llvm::FastMathFlags FMF;
+      FMF.setNoSignedZeros();
+      return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::maxnum,
+          Intrinsic::experimental_constrained_maxnum, &FMF));
+    }
 
     case Builtin::BIfmin:
     case Builtin::BIfminf:
@@ -2624,10 +2628,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     case Builtin::BI__builtin_fminf:
     case Builtin::BI__builtin_fminf16:
     case Builtin::BI__builtin_fminl:
-    case Builtin::BI__builtin_fminf128:
-      return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::minnum,
-                                   Intrinsic::experimental_constrained_minnum));
+    case Builtin::BI__builtin_fminf128: {
+      llvm::FastMathFlags FMF;
+      FMF.setNoSignedZeros();
+      return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::minnum,
+          Intrinsic::experimental_constrained_minnum, &FMF));
+    }
 
     case Builtin::BIfmaximum_num:
     case Builtin::BIfmaximum_numf:
@@ -3798,8 +3805,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
       Result = Builder.CreateBinaryIntrinsic(
           Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
           Op1, nullptr, "elt.max");
-    } else
-      Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
+    } else {
+      FastMathFlags FMF;
+      FMF.setNoSignedZeros(true);
+      Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/FMF, "elt.max");
+    }
     return RValue::get(Result);
   }
   case Builtin::BI__builtin_elementwise_min: {
@@ -3813,8 +3823,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
       Result = Builder.CreateBinaryIntrinsic(
           Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
           Op1, nullptr, "elt.min");
-    } else
-      Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
+    } else {
+      FastMathFlags FMF;
+      FMF.setNoSignedZeros(true);
+      Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/FMF, "elt.min");
+    }
     return RValue::get(Result);
   }
 
diff --git a/clang/test/CodeGen/RISCV/math-builtins.c b/clang/test/CodeGen/RISCV/math-builtins.c
index 9630d62f0f482..e4f456c8eabd7 100644
--- a/clang/test/CodeGen/RISCV/math-builtins.c
+++ b/clang/test/CodeGen/RISCV/math-builtins.c
@@ -134,22 +134,22 @@ long double truncl(long double);
 // RV32-NEXT:    [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
 // RV32-NEXT:    [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV32-NEXT:    [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
-// RV32-NEXT:    [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
+// RV32-NEXT:    [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
 // RV32-NEXT:    [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
 // RV32-NEXT:    [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
-// RV32-NEXT:    [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
+// RV32-NEXT:    [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
 // RV32-NEXT:    [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
 // RV32-NEXT:    [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
-// RV32-NEXT:    [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
+// RV32-NEXT:    [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
 // RV32-NEXT:    [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV32-NEXT:    [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
-// RV32-NEXT:    [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
+// RV32-NEXT:    [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
 // RV32-NEXT:    [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
 // RV32-NEXT:    [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
-// RV32-NEXT:    [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
+// RV32-NEXT:    [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
 // RV32-NEXT:    [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
 // RV32-NEXT:    [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
-// RV32-NEXT:    [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
+// RV32-NEXT:    [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
 // RV32-NEXT:    [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV32-NEXT:    [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV32-NEXT:    [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
@@ -310,22 +310,22 @@ long double truncl(long double);
 // RV64-NEXT:    [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
 // RV64-NEXT:    [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV64-NEXT:    [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
-// RV64-NEXT:    [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
+// RV64-NEXT:    [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
 // RV64-NEXT:    [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
 // RV64-NEXT:    [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
-// RV64-NEXT:    [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
+// RV64-NEXT:    [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
 // RV64-NEXT:    [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
 // RV64-NEXT:    [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
-// RV64-NEXT:    [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
+// RV64-NEXT:    [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
 // RV64-NEXT:    [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV64-NEXT:    [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
-// RV64-NEXT:    [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
+// RV64-NEXT:    [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
 // RV64-NEXT:    [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
 // RV64-NEXT:    [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
-// RV64-NEXT:    [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
+// RV64-NEXT:    [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
 // RV64-NEXT:    [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
 // RV64-NEXT:    [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
-// RV64-NEXT:    [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
+// RV64-NEXT:    [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
 // RV64-NEXT:    [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV64-NEXT:    [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
 // RV64-NEXT:    [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
diff --git a/clang/test/CodeGen/builtins-elementwise-math.c b/clang/test/CodeGen/builtins-elementwise-math.c
index ee8345ff51e5e..9cdb7d03681f6 100644
--- a/clang/test/CodeGen/builtins-elementwise-math.c
+++ b/clang/test/CodeGen/builtins-elementwise-math.c
@@ -347,21 +347,21 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
   // CHECK-LABEL: define void @test_builtin_elementwise_max(
   // CHECK:      [[F1:%.+]] = load float, ptr %f1.addr, align 4
   // CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
-  // CHECK-NEXT:  call float @llvm.maxnum.f32(float [[F1]], float [[F2]])
+  // CHECK-NEXT:  call nsz float @llvm.maxnum.f32(float [[F1]], float [[F2]])
   f1 = __builtin_elementwise_max(f1, f2);
 
   // CHECK:      [[D1:%.+]] = load double, ptr %d1.addr, align 8
   // CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
-  // CHECK-NEXT: call double @llvm.maxnum.f64(double [[D1]], double [[D2]])
+  // CHECK-NEXT: call nsz double @llvm.maxnum.f64(double [[D1]], double [[D2]])
   d1 = __builtin_elementwise_max(d1, d2);
 
   // CHECK:      [[D2:%.+]] = load double, ptr %d2.addr, align 8
-  // CHECK-NEXT: call double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
+  // CHECK-NEXT: call nsz double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
   d1 = __builtin_elementwise_max(20.0, d2);
 
   // CHECK:      [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
   // CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
-  // CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
+  // CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
   vf1 = __builtin_elementwise_max(vf1, vf2);
 
   // CHECK:      [[I1:%.+]] = load i64, ptr %i1.addr, align 8
@@ -404,13 +404,13 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
 
   // CHECK:      [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
   // CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
-  // CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
+  // CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
   const float4 cvf1 = vf1;
   vf1 = __builtin_elementwise_max(cvf1, vf2);
 
   // CHECK:      [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
   // CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
-  // CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
+  // CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
   vf1 = __builtin_elementwise_max(vf2, cvf1);
 
   // CHECK:      [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
@@ -431,21 +431,21 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
   // CHECK-LABEL: define void @test_builtin_elementwise_min(
   // CHECK:      [[F1:%.+]] = load float, ptr %f1.addr, align 4
   // CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
-  // CHECK-NEXT:  call float @llvm.minnum.f32(float [[F1]], float [[F2]])
+  // CHECK-NEXT:  call nsz float @llvm.minnum.f32(float [[F1]], float [[F2]])
   f1 = __builtin_elementwise_min(f1, f2);
 
   // CHECK:      [[D1:%.+]] = load double, ptr %d1.addr, align 8
   // CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
-  // CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double [[D2]])
+  // CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double [[D2]])
   d1 = __builtin_elementwise_min(d1, d2);
 
   // CHECK:      [[D1:%.+]] = load double, ptr %d1.addr, align 8
-  // CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
+  // CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
   d1 = __builtin_elementwise_min(d1, 2.0);
 
   // CHECK:      [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
   // CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
-  // CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
+  // CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
   vf1 = __builtin_elementwise_min(vf1, vf2);
 
   // CHECK:      [[I1:%.+]] = load i64, ptr %i1.addr, align 8
@@ -495,13 +495,13 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
 
   // CHECK:      [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
   // CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
-  // CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
+  // CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
   const float4 cvf1 = vf1;
   vf1 = __builtin_elementwise_min(cvf1, vf2);
 
   // CHECK:      [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
   // CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
-  // CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
+  // CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
   vf1 = __builtin_elementwise_min(vf2, cvf1);
 
   // CHECK:      [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c
index eda6c67fdad00..72e195e82d424 100644
--- a/clang/test/CodeGen/builtins.c
+++ b/clang/test/CodeGen/builtins.c
@@ -343,22 +343,22 @@ void test_float_builtin_ops(float F, double D, long double LD, int I) {
   // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
 
   resf = __builtin_fminf(F, F);
-  // CHECK: call float @llvm.minnum.f32
+  // CHECK: call nsz float @llvm.minnum.f32
 
   resd = __builtin_fmin(D, D);
-  // CHECK: call double @llvm.minnum.f64
+  // CHECK: call nsz double @llvm.minnum.f64
 
   resld = __builtin_fminl(LD, LD);
-  // CHECK: call x86_fp80 @llvm.minnum.f80
+  // CHECK: call nsz x86_fp80 @llvm.minnum.f80
 
   resf = __builtin_fmaxf(F, F);
-  // CHECK: call float @llvm.maxnum.f32
+  // CHECK: call nsz float @llvm.maxnum.f32
 
   resd = __builtin_fmax(D, D);
-  // CHECK: call double @llvm.maxnum.f64
+  // CHECK: call nsz double @llvm.maxnum.f64
 
   resld = __builtin_fmaxl(LD, LD);
-  // CHECK: call x86_fp80 @llvm.maxnum.f80
+  // CHECK: call nsz x86_fp80 @llvm.maxnum.f80
 
   resf = __builtin_fminimum_numf(F, F);
   // CHECK: call float @llvm.minimumnum.f32
diff --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c
index 68b9e75283c54..ae07cce05b307 100644
--- a/clang/test/CodeGen/constrained-math-builtins.c
+++ b/clang/test/CodeGen/constrained-math-builtins.c
@@ -123,17 +123,17 @@ __builtin_atan2(f,f);        __builtin_atan2f(f,f);       __builtin_atan2l(f,f);
 
   __builtin_fmax(f,f);       __builtin_fmaxf(f,f);      __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);
 
-// CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
 
   __builtin_fmin(f,f);       __builtin_fminf(f,f);      __builtin_fminl(f,f); __builtin_fminf128(f,f);
 
-// CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call nsz fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
 
   __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builtin_llrintf128(f);
 
diff --git a/clang/test/CodeGen/fmaxnum_fminnum_use_nsz.c b/clang/test/CodeGen/fmaxnum_fminnum_use_nsz.c
new file mode 100644
index 0000000000000..5d5f942aac5c5
--- /dev/null
+++ b/clang/test/CodeGen/fmaxnum_fminnum_use_nsz.c
@@ -0,0 +1,1129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -disable-llvm-passes -O3 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK
+// RUN: %clang_cc1 -disable-llvm-passes -fno-fast-math -O3 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK
+//
+// RUN: %clang_cc1 -disable-llvm-passes -fsigned-zeros -O3 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK
+// RUN: %clang_cc1 -disable-llvm-passes -menable-no-nans -O3 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-NO-NANS
+// RUN: %clang_cc1 -disable-llvm-passes -menable-no-infs -O3 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-NO-INFS
+/// FIXME: -ffinite-math-only
+// RUN: %clang_cc1 -disable-llvm-passes -ffast-math -O3 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-FAST
+//
+// RUN: %clang_cc1 -disable-llvm-passes -O3 -ffp-exception-behavior=strict -DENSTRICT=1 -triple x86_64 %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-STRICT
+
+float fminf (float, float);
+double fmin (double, double);
+long double fminl (long double, long double);
+float fmaxf (float, float);
+double fmax (double, double);
+long double fmaxl (long double, long double);
+
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef double double2 __attribute__((ext_vector_type(2)));
+
+// CHECK-LABEL: define dso_local float @fmin32(
+// CHECK-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmin32(
+// CHECK-NO-NANS-SAME: float noundef nofpclass(nan) [[A:%.*]], float noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NO-NANS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmin32(
+// CHECK-NO-INFS-SAME: float noundef nofpclass(inf) [[A:%.*]], float noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NO-INFS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmin32(
+// CHECK-FAST-SAME: float noundef nofpclass(nan inf) [[A:%.*]], float noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-FAST-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-FAST-NEXT:    ret float [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmin32(
+// CHECK-STRICT-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-STRICT-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.experimental.constrained.minnum.f32(float [[TMP0]], float [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2:[0-9]+]]
+// CHECK-STRICT-NEXT:    ret float [[TMP2]]
+//
+float fmin32(float a, float b) {
+        return fminf(a, b);
+}
+// CHECK-LABEL: define dso_local float @fmin32b(
+// CHECK-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmin32b(
+// CHECK-NO-NANS-SAME: float noundef nofpclass(nan) [[A:%.*]], float noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmin32b(
+// CHECK-NO-INFS-SAME: float noundef nofpclass(inf) [[A:%.*]], float noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmin32b(
+// CHECK-FAST-SAME: float noundef nofpclass(nan inf) [[A:%.*]], float noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-FAST-NEXT:    ret float [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmin32b(
+// CHECK-STRICT-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.experimental.constrained.minnum.f32(float [[TMP0]], float [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[TMP2]]
+//
+float fmin32b(float a, float b) {
+        return __builtin_fminf(a, b);
+}
+#if !defined(ENSTRICT)
+// CHECK-LABEL: define dso_local <4 x float> @pfmin32(
+// CHECK-SAME: <4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]], <4 x float> noundef [[C:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[ELT_MIN:%.*]] = call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-NEXT:    store <4 x float> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret <4 x float> [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) <4 x float> @pfmin32(
+// CHECK-NO-NANS-SAME: <4 x float> noundef nofpclass(nan) [[A:%.*]], <4 x float> noundef nofpclass(nan) [[B:%.*]], <4 x float> noundef nofpclass(nan) [[C:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-NANS-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[ELT_MIN:%.*]] = call nnan nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    ret <4 x float> [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) <4 x float> @pfmin32(
+// CHECK-NO-INFS-SAME: <4 x float> noundef nofpclass(inf) [[A:%.*]], <4 x float> noundef nofpclass(inf) [[B:%.*]], <4 x float> noundef nofpclass(inf) [[C:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-INFS-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[ELT_MIN:%.*]] = call ninf nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    ret <4 x float> [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) <4 x float> @pfmin32(
+// CHECK-FAST-SAME: <4 x float> noundef nofpclass(nan inf) [[A:%.*]], <4 x float> noundef nofpclass(nan inf) [[B:%.*]], <4 x float> noundef nofpclass(nan inf) [[C:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-FAST-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-FAST-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-FAST-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[ELT_MIN:%.*]] = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-FAST-NEXT:    store <4 x float> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    ret <4 x float> [[TMP2]]
+//
+float4 pfmin32(float4 a, float4 b, float4 c) {
+	c = __builtin_elementwise_min(a, b);
+	return c;
+}
+#endif
+// CHECK-LABEL: define dso_local float @fmin64(
+// CHECK-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP2]] to float
+// CHECK-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmin64(
+// CHECK-NO-NANS-SAME: double noundef nofpclass(nan) [[A:%.*]], double noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-NO-NANS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-NANS-NEXT:    [[CONV:%.*]] = fptrunc nnan double [[TMP2]] to float
+// CHECK-NO-NANS-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmin64(
+// CHECK-NO-INFS-SAME: double noundef nofpclass(inf) [[A:%.*]], double noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-NO-INFS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-INFS-NEXT:    [[CONV:%.*]] = fptrunc ninf double [[TMP2]] to float
+// CHECK-NO-INFS-NEXT:    ret float [[CONV]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmin64(
+// CHECK-FAST-SAME: double noundef nofpclass(nan inf) [[A:%.*]], double noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-FAST-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-FAST-NEXT:    [[CONV:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP2]] to float
+// CHECK-FAST-NEXT:    ret float [[CONV]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmin64(
+// CHECK-STRICT-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-STRICT-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.experimental.constrained.minnum.f64(double [[TMP0]], double [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[CONV]]
+//
+float fmin64(double a, double b) {
+        return fmin(a, b);
+}
+// CHECK-LABEL: define dso_local float @fmin64b(
+// CHECK-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP2]] to float
+// CHECK-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmin64b(
+// CHECK-NO-NANS-SAME: double noundef nofpclass(nan) [[A:%.*]], double noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-NANS-NEXT:    [[CONV:%.*]] = fptrunc nnan double [[TMP2]] to float
+// CHECK-NO-NANS-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmin64b(
+// CHECK-NO-INFS-SAME: double noundef nofpclass(inf) [[A:%.*]], double noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-INFS-NEXT:    [[CONV:%.*]] = fptrunc ninf double [[TMP2]] to float
+// CHECK-NO-INFS-NEXT:    ret float [[CONV]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmin64b(
+// CHECK-FAST-SAME: double noundef nofpclass(nan inf) [[A:%.*]], double noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.minnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-FAST-NEXT:    [[CONV:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP2]] to float
+// CHECK-FAST-NEXT:    ret float [[CONV]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmin64b(
+// CHECK-STRICT-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.experimental.constrained.minnum.f64(double [[TMP0]], double [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[CONV]]
+//
+float fmin64b(double a, double b) {
+        return __builtin_fmin(a, b);
+}
+#if !defined(ENSTRICT)
+// CHECK-LABEL: define dso_local <2 x double> @pfmin64(
+// CHECK-SAME: <2 x double> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]], <2 x double> noundef [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[ELT_MIN:%.*]] = call nsz <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-NEXT:    store <2 x double> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret <2 x double> [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) <2 x double> @pfmin64(
+// CHECK-NO-NANS-SAME: <2 x double> noundef nofpclass(nan) [[A:%.*]], <2 x double> noundef nofpclass(nan) [[B:%.*]], <2 x double> noundef nofpclass(nan) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-NANS-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[ELT_MIN:%.*]] = call nnan nsz <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    ret <2 x double> [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) <2 x double> @pfmin64(
+// CHECK-NO-INFS-SAME: <2 x double> noundef nofpclass(inf) [[A:%.*]], <2 x double> noundef nofpclass(inf) [[B:%.*]], <2 x double> noundef nofpclass(inf) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-INFS-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[ELT_MIN:%.*]] = call ninf nsz <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    ret <2 x double> [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) <2 x double> @pfmin64(
+// CHECK-FAST-SAME: <2 x double> noundef nofpclass(nan inf) [[A:%.*]], <2 x double> noundef nofpclass(nan inf) [[B:%.*]], <2 x double> noundef nofpclass(nan inf) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-FAST-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-FAST-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[ELT_MIN:%.*]] = call reassoc nnan ninf nsz arcp afn <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-FAST-NEXT:    store <2 x double> [[ELT_MIN]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    ret <2 x double> [[TMP2]]
+//
+double2 pfmin64(double2 a, double2 b, double2 c) {
+	c = __builtin_elementwise_min(a, b);
+	return c;
+}
+#endif
+// CHECK-LABEL: define dso_local x86_fp80 @fmin80(
+// CHECK-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) x86_fp80 @fmin80(
+// CHECK-NO-NANS-SAME: x86_fp80 noundef nofpclass(nan) [[A:%.*]], x86_fp80 noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) x86_fp80 @fmin80(
+// CHECK-NO-INFS-SAME: x86_fp80 noundef nofpclass(inf) [[A:%.*]], x86_fp80 noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) x86_fp80 @fmin80(
+// CHECK-FAST-SAME: x86_fp80 noundef nofpclass(nan inf) [[A:%.*]], x86_fp80 noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-FAST-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-FAST-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local x86_fp80 @fmin80(
+// CHECK-STRICT-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA8:![0-9]+]]
+// CHECK-STRICT-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret x86_fp80 [[TMP2]]
+//
+long double fmin80(long double a, long double b) {
+        return fminl(a, b);
+}
+// CHECK-LABEL: define dso_local x86_fp80 @fmin80b(
+// CHECK-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) x86_fp80 @fmin80b(
+// CHECK-NO-NANS-SAME: x86_fp80 noundef nofpclass(nan) [[A:%.*]], x86_fp80 noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) x86_fp80 @fmin80b(
+// CHECK-NO-INFS-SAME: x86_fp80 noundef nofpclass(inf) [[A:%.*]], x86_fp80 noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) x86_fp80 @fmin80b(
+// CHECK-FAST-SAME: x86_fp80 noundef nofpclass(nan inf) [[A:%.*]], x86_fp80 noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-FAST-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local x86_fp80 @fmin80b(
+// CHECK-STRICT-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret x86_fp80 [[TMP2]]
+//
+long double fmin80b(long double a, long double b) {
+        return __builtin_fminl(a, b);
+}
+// CHECK-LABEL: define dso_local float @fmax32(
+// CHECK-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmax32(
+// CHECK-NO-NANS-SAME: float noundef nofpclass(nan) [[A:%.*]], float noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmax32(
+// CHECK-NO-INFS-SAME: float noundef nofpclass(inf) [[A:%.*]], float noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmax32(
+// CHECK-FAST-SAME: float noundef nofpclass(nan inf) [[A:%.*]], float noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-FAST-NEXT:    ret float [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmax32(
+// CHECK-STRICT-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.experimental.constrained.maxnum.f32(float [[TMP0]], float [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[TMP2]]
+//
+float fmax32(float a, float b) {
+        return fmaxf(a, b);
+}
+// CHECK-LABEL: define dso_local float @fmax32b(
+// CHECK-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmax32b(
+// CHECK-NO-NANS-SAME: float noundef nofpclass(nan) [[A:%.*]], float noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-NANS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmax32b(
+// CHECK-NO-INFS-SAME: float noundef nofpclass(inf) [[A:%.*]], float noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NO-INFS-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret float [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmax32b(
+// CHECK-FAST-SAME: float noundef nofpclass(nan inf) [[A:%.*]], float noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-FAST-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-FAST-NEXT:    ret float [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmax32b(
+// CHECK-STRICT-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-STRICT-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4, !tbaa [[TBAA2]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz float @llvm.experimental.constrained.maxnum.f32(float [[TMP0]], float [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[TMP2]]
+//
+float fmax32b(float a, float b) {
+        return __builtin_fmaxf(a, b);
+}
+#if !defined(ENSTRICT)
+// CHECK-LABEL: define dso_local <4 x float> @pfmax32(
+// CHECK-SAME: <4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]], <4 x float> noundef [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[ELT_MAX:%.*]] = call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-NEXT:    store <4 x float> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret <4 x float> [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) <4 x float> @pfmax32(
+// CHECK-NO-NANS-SAME: <4 x float> noundef nofpclass(nan) [[A:%.*]], <4 x float> noundef nofpclass(nan) [[B:%.*]], <4 x float> noundef nofpclass(nan) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-NANS-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[ELT_MAX:%.*]] = call nnan nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-NO-NANS-NEXT:    store <4 x float> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    ret <4 x float> [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) <4 x float> @pfmax32(
+// CHECK-NO-INFS-SAME: <4 x float> noundef nofpclass(inf) [[A:%.*]], <4 x float> noundef nofpclass(inf) [[B:%.*]], <4 x float> noundef nofpclass(inf) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-INFS-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[ELT_MAX:%.*]] = call ninf nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-NO-INFS-NEXT:    store <4 x float> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    ret <4 x float> [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) <4 x float> @pfmax32(
+// CHECK-FAST-SAME: <4 x float> noundef nofpclass(nan inf) [[A:%.*]], <4 x float> noundef nofpclass(nan inf) [[B:%.*]], <4 x float> noundef nofpclass(nan inf) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-FAST-NEXT:    [[C_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-FAST-NEXT:    store <4 x float> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <4 x float> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <4 x float> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[ELT_MAX:%.*]] = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP0]], <4 x float> [[TMP1]])
+// CHECK-FAST-NEXT:    store <4 x float> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    ret <4 x float> [[TMP2]]
+//
+float4 pfmax32(float4 a, float4 b, float4 c) {
+	c = __builtin_elementwise_max(a, b);
+	return c;
+}
+#endif
+// CHECK-LABEL: define dso_local float @fmax64(
+// CHECK-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP2]] to float
+// CHECK-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmax64(
+// CHECK-NO-NANS-SAME: double noundef nofpclass(nan) [[A:%.*]], double noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-NANS-NEXT:    [[CONV:%.*]] = fptrunc nnan double [[TMP2]] to float
+// CHECK-NO-NANS-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmax64(
+// CHECK-NO-INFS-SAME: double noundef nofpclass(inf) [[A:%.*]], double noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-INFS-NEXT:    [[CONV:%.*]] = fptrunc ninf double [[TMP2]] to float
+// CHECK-NO-INFS-NEXT:    ret float [[CONV]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmax64(
+// CHECK-FAST-SAME: double noundef nofpclass(nan inf) [[A:%.*]], double noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-FAST-NEXT:    [[CONV:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP2]] to float
+// CHECK-FAST-NEXT:    ret float [[CONV]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmax64(
+// CHECK-STRICT-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.experimental.constrained.maxnum.f64(double [[TMP0]], double [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[CONV]]
+//
+float fmax64(double a, double b) {
+        return fmax(a, b);
+}
+// CHECK-LABEL: define dso_local float @fmax64b(
+// CHECK-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP2]] to float
+// CHECK-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) float @fmax64b(
+// CHECK-NO-NANS-SAME: double noundef nofpclass(nan) [[A:%.*]], double noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-NANS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-NANS-NEXT:    [[CONV:%.*]] = fptrunc nnan double [[TMP2]] to float
+// CHECK-NO-NANS-NEXT:    ret float [[CONV]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) float @fmax64b(
+// CHECK-NO-INFS-SAME: double noundef nofpclass(inf) [[A:%.*]], double noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NO-INFS-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-NO-INFS-NEXT:    [[CONV:%.*]] = fptrunc ninf double [[TMP2]] to float
+// CHECK-NO-INFS-NEXT:    ret float [[CONV]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) float @fmax64b(
+// CHECK-FAST-SAME: double noundef nofpclass(nan inf) [[A:%.*]], double noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-FAST-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA7]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.maxnum.f64(double [[TMP0]], double [[TMP1]])
+// CHECK-FAST-NEXT:    [[CONV:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP2]] to float
+// CHECK-FAST-NEXT:    ret float [[CONV]]
+//
+// CHECK-STRICT-LABEL: define dso_local float @fmax64b(
+// CHECK-STRICT-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-STRICT-NEXT:    store double [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    store double [[B]], ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load double, ptr [[B_ADDR]], align 8, !tbaa [[TBAA6]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz double @llvm.experimental.constrained.maxnum.f64(double [[TMP0]], double [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret float [[CONV]]
+//
+float fmax64b(double a, double b) {
+        return __builtin_fmax(a, b);
+}
+#if !defined(ENSTRICT)
+// CHECK-LABEL: define dso_local <2 x double> @pfmax64(
+// CHECK-SAME: <2 x double> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]], <2 x double> noundef [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[ELT_MAX:%.*]] = call nsz <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-NEXT:    store <2 x double> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret <2 x double> [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) <2 x double> @pfmax64(
+// CHECK-NO-NANS-SAME: <2 x double> noundef nofpclass(nan) [[A:%.*]], <2 x double> noundef nofpclass(nan) [[B:%.*]], <2 x double> noundef nofpclass(nan) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-NANS-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[ELT_MAX:%.*]] = call nnan nsz <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-NO-NANS-NEXT:    store <2 x double> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-NANS-NEXT:    ret <2 x double> [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) <2 x double> @pfmax64(
+// CHECK-NO-INFS-SAME: <2 x double> noundef nofpclass(inf) [[A:%.*]], <2 x double> noundef nofpclass(inf) [[B:%.*]], <2 x double> noundef nofpclass(inf) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-INFS-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[ELT_MAX:%.*]] = call ninf nsz <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-NO-INFS-NEXT:    store <2 x double> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NO-INFS-NEXT:    ret <2 x double> [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) <2 x double> @pfmax64(
+// CHECK-FAST-SAME: <2 x double> noundef nofpclass(nan inf) [[A:%.*]], <2 x double> noundef nofpclass(nan inf) [[B:%.*]], <2 x double> noundef nofpclass(nan inf) [[C:%.*]]) #[[ATTR2]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-FAST-NEXT:    [[C_ADDR:%.*]] = alloca <2 x double>, align 16
+// CHECK-FAST-NEXT:    store <2 x double> [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <2 x double> [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    store <2 x double> [[C]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[ELT_MAX:%.*]] = call reassoc nnan ninf nsz arcp afn <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP0]], <2 x double> [[TMP1]])
+// CHECK-FAST-NEXT:    store <2 x double> [[ELT_MAX]], ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[C_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-FAST-NEXT:    ret <2 x double> [[TMP2]]
+//
+double2 pfmax64(double2 a, double2 b, double2 c) {
+	c = __builtin_elementwise_max(a, b);
+	return c;
+}
+#endif
+// CHECK-LABEL: define dso_local x86_fp80 @fmax80(
+// CHECK-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) x86_fp80 @fmax80(
+// CHECK-NO-NANS-SAME: x86_fp80 noundef nofpclass(nan) [[A:%.*]], x86_fp80 noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) x86_fp80 @fmax80(
+// CHECK-NO-INFS-SAME: x86_fp80 noundef nofpclass(inf) [[A:%.*]], x86_fp80 noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) x86_fp80 @fmax80(
+// CHECK-FAST-SAME: x86_fp80 noundef nofpclass(nan inf) [[A:%.*]], x86_fp80 noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-FAST-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local x86_fp80 @fmax80(
+// CHECK-STRICT-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret x86_fp80 [[TMP2]]
+//
+long double fmax80(long double a, long double b) {
+        return fmaxl(a, b);
+}
+// CHECK-LABEL: define dso_local x86_fp80 @fmax80b(
+// CHECK-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-NANS-LABEL: define dso_local nofpclass(nan) x86_fp80 @fmax80b(
+// CHECK-NO-NANS-SAME: x86_fp80 noundef nofpclass(nan) [[A:%.*]], x86_fp80 noundef nofpclass(nan) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-NANS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-NANS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-NANS-NEXT:    [[TMP2:%.*]] = call nnan nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-NANS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-NO-INFS-LABEL: define dso_local nofpclass(inf) x86_fp80 @fmax80b(
+// CHECK-NO-INFS-SAME: x86_fp80 noundef nofpclass(inf) [[A:%.*]], x86_fp80 noundef nofpclass(inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NO-INFS-NEXT:  [[ENTRY:.*:]]
+// CHECK-NO-INFS-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-NO-INFS-NEXT:    [[TMP2:%.*]] = call ninf nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-NO-INFS-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-FAST-LABEL: define dso_local nofpclass(nan inf) x86_fp80 @fmax80b(
+// CHECK-FAST-SAME: x86_fp80 noundef nofpclass(nan inf) [[A:%.*]], x86_fp80 noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
+// CHECK-FAST-NEXT:  [[ENTRY:.*:]]
+// CHECK-FAST-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-FAST-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA9]]
+// CHECK-FAST-NEXT:    [[TMP2:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]])
+// CHECK-FAST-NEXT:    ret x86_fp80 [[TMP2]]
+//
+// CHECK-STRICT-LABEL: define dso_local x86_fp80 @fmax80b(
+// CHECK-STRICT-SAME: x86_fp80 noundef [[A:%.*]], x86_fp80 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-STRICT-NEXT:  [[ENTRY:.*:]]
+// CHECK-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca x86_fp80, align 16
+// CHECK-STRICT-NEXT:    store x86_fp80 [[A]], ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    store x86_fp80 [[B]], ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[A_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP1:%.*]] = load x86_fp80, ptr [[B_ADDR]], align 16, !tbaa [[TBAA8]]
+// CHECK-STRICT-NEXT:    [[TMP2:%.*]] = call nsz x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"fpexcept.strict") #[[ATTR2]]
+// CHECK-STRICT-NEXT:    ret x86_fp80 [[TMP2]]
+//
+long double fmax80b(long double a, long double b) {
+        return __builtin_fmaxl(a, b);
+}
+
+//.
+// CHECK: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK: [[META3]] = !{!"float", [[META4:![0-9]+]], i64 0}
+// CHECK: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[TBAA6]] = !{[[META4]], [[META4]], i64 0}
+// CHECK: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK: [[META8]] = !{!"double", [[META4]], i64 0}
+// CHECK: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// CHECK: [[META10]] = !{!"long double", [[META4]], i64 0}
+//.
+// CHECK-NO-NANS: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK-NO-NANS: [[META3]] = !{!"float", [[META4:![0-9]+]], i64 0}
+// CHECK-NO-NANS: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK-NO-NANS: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK-NO-NANS: [[TBAA6]] = !{[[META4]], [[META4]], i64 0}
+// CHECK-NO-NANS: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK-NO-NANS: [[META8]] = !{!"double", [[META4]], i64 0}
+// CHECK-NO-NANS: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// CHECK-NO-NANS: [[META10]] = !{!"long double", [[META4]], i64 0}
+//.
+// CHECK-NO-INFS: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK-NO-INFS: [[META3]] = !{!"float", [[META4:![0-9]+]], i64 0}
+// CHECK-NO-INFS: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK-NO-INFS: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK-NO-INFS: [[TBAA6]] = !{[[META4]], [[META4]], i64 0}
+// CHECK-NO-INFS: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK-NO-INFS: [[META8]] = !{!"double", [[META4]], i64 0}
+// CHECK-NO-INFS: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// CHECK-NO-INFS: [[META10]] = !{!"long double", [[META4]], i64 0}
+//.
+// CHECK-FAST: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK-FAST: [[META3]] = !{!"float", [[META4:![0-9]+]], i64 0}
+// CHECK-FAST: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK-FAST: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK-FAST: [[TBAA6]] = !{[[META4]], [[META4]], i64 0}
+// CHECK-FAST: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK-FAST: [[META8]] = !{!"double", [[META4]], i64 0}
+// CHECK-FAST: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// CHECK-FAST: [[META10]] = !{!"long double", [[META4]], i64 0}
+//.
+// CHECK-STRICT: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK-STRICT: [[META3]] = !{!"float", [[META4:![0-9]+]], i64 0}
+// CHECK-STRICT: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK-STRICT: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK-STRICT: [[TBAA6]] = !{[[META7:![0-9]+]], [[META7]], i64 0}
+// CHECK-STRICT: [[META7]] = !{!"double", [[META4]], i64 0}
+// CHECK-STRICT: [[TBAA8]] = !{[[META9:![0-9]+]], [[META9]], i64 0}
+// CHECK-STRICT: [[META9]] = !{!"long double", [[META4]], i64 0}
+//.
diff --git a/clang/test/CodeGen/math-builtins-long.c b/clang/test/CodeGen/math-builtins-long.c
index 87e64a2eaa1c3..28a4e8893d5bd 100644
--- a/clang/test/CodeGen/math-builtins-long.c
+++ b/clang/test/CodeGen/math-builtins-long.c
@@ -148,16 +148,16 @@ void foo(long double f, long double *l, int *i, const char *c) {
   // PPCF128: call fp128 @llvm.floor.f128(fp128 %{{.+}})
   __builtin_floorl(f);
 
-  // F80: call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
-  // PPC: call ppc_fp128 @llvm.maxnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
-  // X86F128: call fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
-  // PPCF128: call fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
+  // F80: call nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
+  // PPC: call nsz ppc_fp128 @llvm.maxnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
+  // X86F128: call nsz fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
+  // PPCF128: call nsz fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
   __builtin_fmaxl(f,f);
 
-  // F80: call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
-  // PPC: call ppc_fp128 @llvm.minnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
-  // X86F128: call fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
-  // PPCF128: call fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
+  // F80: call nsz x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
+  // PPC: call nsz ppc_fp128 @llvm.minnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
+  // X86F128: call nsz fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
+  // PPCF128: call nsz fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
   __builtin_fminl(f,f);
 
   // F80: call x86_fp80 @llvm.nearbyint.f80(x86_fp80 %{{.+}})
diff --git a/clang/test/CodeGen/strictfp-elementwise-builtins.cpp b/clang/test/CodeGen/strictfp-elementwise-builtins.cpp
index b250512efc5c7..9448f3a2668b6 100644
--- a/clang/test/CodeGen/strictfp-elementwise-builtins.cpp
+++ b/clang/test/CodeGen/strictfp-elementwise-builtins.cpp
@@ -30,7 +30,7 @@ float4 strict_elementwise_abs(float4 a) {
 // CHECK-LABEL: define dso_local noundef <4 x float> @_Z22strict_elementwise_maxDv4_fS_
 // CHECK-SAME: (<4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[ELT_MAX:%.*]] = tail call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> [[A]], <4 x float> [[B]], metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[ELT_MAX:%.*]] = tail call nsz <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> [[A]], <4 x float> [[B]], metadata !"fpexcept.strict") #[[ATTR4]]
 // CHECK-NEXT:    ret <4 x float> [[ELT_MAX]]
 //
 float4 strict_elementwise_max(float4 a, float4 b) {
@@ -40,7 +40,7 @@ float4 strict_elementwise_max(float4 a, float4 b) {
 // CHECK-LABEL: define dso_local noundef <4 x float> @_Z22strict_elementwise_minDv4_fS_
 // CHECK-SAME: (<4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[ELT_MIN:%.*]] = tail call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> [[A]], <4 x float> [[B]], metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[ELT_MIN:%.*]] = tail call nsz <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> [[A]], <4 x float> [[B]], metadata !"fpexcept.strict") #[[ATTR4]]
 // CHECK-NEXT:    ret <4 x float> [[ELT_MIN]]
 //
 float4 strict_elementwise_min(float4 a, float4 b) {
diff --git a/clang/test/CodeGenOpenCL/builtins-f16.cl b/clang/test/CodeGenOpenCL/builtins-f16.cl
index e8b62fe0830cd..f30ed0a1944ff 100644
--- a/clang/test/CodeGenOpenCL/builtins-f16.cl
+++ b/clang/test/CodeGenOpenCL/builtins-f16.cl
@@ -48,10 +48,10 @@ void test_half_builtins(half h0, half h1, half h2, int i0) {
   // CHECK: call half @llvm.fma.f16(half %h0, half %h1, half %h2)
   res = __builtin_fmaf16(h0, h1 ,h2);
 
-  // CHECK: call half @llvm.maxnum.f16(half %h0, half %h1)
+  // CHECK: call nsz half @llvm.maxnum.f16(half %h0, half %h1)
   res = __builtin_fmaxf16(h0, h1);
 
-  // CHECK: call half @llvm.minnum.f16(half %h0, half %h1)
+  // CHECK: call nsz half @llvm.minnum.f16(half %h0, half %h1)
   res = __builtin_fminf16(h0, h1);
 
   // CHECK: frem half %h0, %h1
diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip
index df1cd716342a5..518fa5570b423 100644
--- a/clang/test/Headers/__clang_hip_math.hip
+++ b/clang/test/Headers/__clang_hip_math.hip
@@ -1701,22 +1701,22 @@ extern "C" __device__ double test_fma_rn(double x, double y, double z) {
 
 // DEFAULT-LABEL: @test_fmaxf(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // DEFAULT-NEXT:    ret float [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_fmaxf(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef float @llvm.maxnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef float @llvm.maxnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret float [[TMP0]]
 //
 // APPROX-LABEL: @test_fmaxf(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // APPROX-NEXT:    ret float [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_fmaxf(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret float [[TMP0]]
 //
 extern "C" __device__ float test_fmaxf(float x, float y) {
@@ -1725,22 +1725,22 @@ extern "C" __device__ float test_fmaxf(float x, float y) {
 
 // DEFAULT-LABEL: @test_fmax(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // DEFAULT-NEXT:    ret double [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_fmax(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef double @llvm.maxnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef double @llvm.maxnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret double [[TMP0]]
 //
 // APPROX-LABEL: @test_fmax(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // APPROX-NEXT:    ret double [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_fmax(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret double [[TMP0]]
 //
 extern "C" __device__ double test_fmax(double x, double y) {
@@ -1749,22 +1749,22 @@ extern "C" __device__ double test_fmax(double x, double y) {
 
 // DEFAULT-LABEL: @test_fminf(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // DEFAULT-NEXT:    ret float [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_fminf(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef float @llvm.minnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef float @llvm.minnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret float [[TMP0]]
 //
 // APPROX-LABEL: @test_fminf(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // APPROX-NEXT:    ret float [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_fminf(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret float [[TMP0]]
 //
 extern "C" __device__ float test_fminf(float x, float y) {
@@ -1773,22 +1773,22 @@ extern "C" __device__ float test_fminf(float x, float y) {
 
 // DEFAULT-LABEL: @test_fmin(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // DEFAULT-NEXT:    ret double [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_fmin(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef double @llvm.minnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef double @llvm.minnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret double [[TMP0]]
 //
 // APPROX-LABEL: @test_fmin(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // APPROX-NEXT:    ret double [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_fmin(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret double [[TMP0]]
 //
 extern "C" __device__ double test_fmin(double x, double y) {
@@ -6706,22 +6706,22 @@ extern "C" __device__ double test__fma_rn(double x, double y, double z) {
 
 // DEFAULT-LABEL: @test_float_min(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // DEFAULT-NEXT:    ret float [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_float_min(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef float @llvm.minnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef float @llvm.minnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret float [[TMP0]]
 //
 // APPROX-LABEL: @test_float_min(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // APPROX-NEXT:    ret float [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_float_min(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret float [[TMP0]]
 //
 extern "C" __device__ float test_float_min(float x, float y) {
@@ -6730,22 +6730,22 @@ extern "C" __device__ float test_float_min(float x, float y) {
 
 // DEFAULT-LABEL: @test_float_max(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // DEFAULT-NEXT:    ret float [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_float_max(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef float @llvm.maxnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef float @llvm.maxnum.f32(float nofpclass(nan inf) [[X:%.*]], float nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret float [[TMP0]]
 //
 // APPROX-LABEL: @test_float_max(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // APPROX-NEXT:    ret float [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_float_max(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret float [[TMP0]]
 //
 extern "C" __device__ float test_float_max(float x, float y) {
@@ -6754,22 +6754,22 @@ extern "C" __device__ float test_float_max(float x, float y) {
 
 // DEFAULT-LABEL: @test_double_min(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // DEFAULT-NEXT:    ret double [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_double_min(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef double @llvm.minnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef double @llvm.minnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret double [[TMP0]]
 //
 // APPROX-LABEL: @test_double_min(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // APPROX-NEXT:    ret double [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_double_min(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret double [[TMP0]]
 //
 extern "C" __device__ double test_double_min(double x, double y) {
@@ -6778,22 +6778,22 @@ extern "C" __device__ double test_double_min(double x, double y) {
 
 // DEFAULT-LABEL: @test_double_max(
 // DEFAULT-NEXT:  entry:
-// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// DEFAULT-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // DEFAULT-NEXT:    ret double [[TMP0]]
 //
 // FINITEONLY-LABEL: @test_double_max(
 // FINITEONLY-NEXT:  entry:
-// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf contract noundef double @llvm.maxnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
+// FINITEONLY-NEXT:    [[TMP0:%.*]] = tail call nnan ninf nsz contract noundef double @llvm.maxnum.f64(double nofpclass(nan inf) [[X:%.*]], double nofpclass(nan inf) [[Y:%.*]])
 // FINITEONLY-NEXT:    ret double [[TMP0]]
 //
 // APPROX-LABEL: @test_double_max(
 // APPROX-NEXT:  entry:
-// APPROX-NEXT:    [[TMP0:%.*]] = tail call contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// APPROX-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // APPROX-NEXT:    ret double [[TMP0]]
 //
 // AMDGCNSPIRV-LABEL: @test_double_max(
 // AMDGCNSPIRV-NEXT:  entry:
-// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call contract noundef addrspace(4) double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+// AMDGCNSPIRV-NEXT:    [[TMP0:%.*]] = tail call nsz contract noundef addrspace(4) double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
 // AMDGCNSPIRV-NEXT:    ret double [[TMP0]]
 //
 extern "C" __device__ double test_double_max(double x, double y) {
diff --git a/clang/test/Headers/amdgcn_openmp_device_math_constexpr.cpp b/clang/test/Headers/amdgcn_openmp_device_math_constexpr.cpp
index 0fdc02edc1508..855333132c4de 100644
--- a/clang/test/Headers/amdgcn_openmp_device_math_constexpr.cpp
+++ b/clang/test/Headers/amdgcn_openmp_device_math_constexpr.cpp
@@ -208,7 +208,7 @@ const float constexpr_fmaxf_f32 = fmaxf(2.0f, -4.0f);
 // CHECK-NEXT:    store float -4.000000e+00, ptr [[__Y_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[__X_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[__Y_ADDR_ASCAST_I]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = call noundef float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz noundef float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
 // CHECK-NEXT:    store float [[TMP2]], ptr addrspacecast (ptr addrspace(1) @_ZL17constexpr_min_f32 to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -226,7 +226,7 @@ const float constexpr_fmaxf_f32 = fmaxf(2.0f, -4.0f);
 // CHECK-NEXT:    store float -4.000000e+00, ptr [[__Y_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[__X_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[__Y_ADDR_ASCAST_I]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = call noundef float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz noundef float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
 // CHECK-NEXT:    store float [[TMP2]], ptr addrspacecast (ptr addrspace(1) @_ZL17constexpr_max_f32 to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -260,7 +260,7 @@ const float constexpr_fmaxf_f32 = fmaxf(2.0f, -4.0f);
 // CHECK-NEXT:    store float -4.000000e+00, ptr [[__Y_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[__X_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[__Y_ADDR_ASCAST_I]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = call noundef float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz noundef float @llvm.minnum.f32(float [[TMP0]], float [[TMP1]])
 // CHECK-NEXT:    store float [[TMP2]], ptr addrspacecast (ptr addrspace(1) @_ZL19constexpr_fminf_f32 to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -278,7 +278,7 @@ const float constexpr_fmaxf_f32 = fmaxf(2.0f, -4.0f);
 // CHECK-NEXT:    store float -4.000000e+00, ptr [[__Y_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[__X_ADDR_ASCAST_I]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[__Y_ADDR_ASCAST_I]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = call noundef float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call nsz noundef float @llvm.maxnum.f32(float [[TMP0]], float [[TMP1]])
 // CHECK-NEXT:    store float [[TMP2]], ptr addrspacecast (ptr addrspace(1) @_ZL19constexpr_fmaxf_f32 to ptr), align 4
 // CHECK-NEXT:    ret void
 //
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 0e68ffadc6939..4eb1897da0786 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -2491,12 +2491,14 @@ class IRBuilderBase {
 public:
   CallInst *CreateCall(FunctionType *FTy, Value *Callee,
                        ArrayRef<Value *> Args = {}, const Twine &Name = "",
-                       MDNode *FPMathTag = nullptr) {
+                       MDNode *FPMathTag = nullptr,
+                       FastMathFlags *uFMF = nullptr) {
     CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
     if (IsFPConstrained)
       setConstrainedFPCallAttr(CI);
-    if (isa<FPMathOperator>(CI))
-      setFPAttrs(CI, FPMathTag, FMF);
+    if (isa<FPMathOperator>(CI)) {
+      setFPAttrs(CI, FPMathTag, uFMF ? (FMF | *uFMF) : FMF);
+    }
     return Insert(CI, Name);
   }
 
@@ -2512,9 +2514,10 @@ class IRBuilderBase {
   }
 
   CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = {},
-                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr,
+                       FastMathFlags *uFMF = nullptr) {
     return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
-                      FPMathTag);
+                      FPMathTag, uFMF);
   }
 
   CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
@@ -2527,7 +2530,8 @@ class IRBuilderBase {
   CallInst *CreateConstrainedFPCall(
       Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
       std::optional<RoundingMode> Rounding = std::nullopt,
-      std::optional<fp::ExceptionBehavior> Except = std::nullopt);
+      std::optional<fp::ExceptionBehavior> Except = std::nullopt,
+      FastMathFlags *FMF = nullptr);
 
   Value *CreateSelect(Value *C, Value *True, Value *False,
                       const Twine &Name = "", Instruction *MDFrom = nullptr);
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index e5a2f08c393c9..cced375191e2a 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -1090,7 +1090,7 @@ CallInst *IRBuilderBase::CreateConstrainedFPCmp(
 CallInst *IRBuilderBase::CreateConstrainedFPCall(
     Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
     std::optional<RoundingMode> Rounding,
-    std::optional<fp::ExceptionBehavior> Except) {
+    std::optional<fp::ExceptionBehavior> Except, FastMathFlags *FMF) {
   llvm::SmallVector<Value *, 6> UseArgs;
 
   append_range(UseArgs, Args);
@@ -1099,7 +1099,7 @@ CallInst *IRBuilderBase::CreateConstrainedFPCall(
     UseArgs.push_back(getConstrainedFPRounding(Rounding));
   UseArgs.push_back(getConstrainedFPExcept(Except));
 
-  CallInst *C = CreateCall(Callee, UseArgs, Name);
+  CallInst *C = CreateCall(Callee, UseArgs, Name, nullptr, FMF);
   setConstrainedFPCallAttr(C);
   return C;
 }



More information about the llvm-commits mailing list