[clang] 7f38812 - [FPEnv][AArch64] Platform-specific builtin constrained FP enablement

Kevin P. Neal via cfe-commits cfe-commits at lists.llvm.org
Fri Apr 10 10:04:24 PDT 2020


Author: Kevin P. Neal
Date: 2020-04-10T13:02:00-04:00
New Revision: 7f38812d5b7e0fd5145e44d5ae831a67d782dca6

URL: https://github.com/llvm/llvm-project/commit/7f38812d5b7e0fd5145e44d5ae831a67d782dca6
DIFF: https://github.com/llvm/llvm-project/commit/7f38812d5b7e0fd5145e44d5ae831a67d782dca6.diff

LOG: [FPEnv][AArch64] Platform-specific builtin constrained FP enablement

When constrained floating point is enabled the AArch64-specific builtins don't use constrained intrinsics in some cases. Fix that.

Neon is part of this patch, so ARM is affected as well.

Differential Revision: https://reviews.llvm.org/D77074

Added: 
    clang/test/CodeGen/aarch64-neon-intrinsics-constrained.c
    clang/test/CodeGen/aarch64-neon-misc-constrained.c
    clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem-constrained.c
    clang/test/CodeGen/aarch64-v8.2a-fp16-intrinsics-constrained.c
    clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c
    clang/test/CodeGen/arm-neon-directed-rounding-constrained.c
    clang/test/CodeGen/arm64-vrnd-constrained.c

Modified: 
    clang/lib/CodeGen/CGBuiltin.cpp
    llvm/include/llvm/IR/Function.h
    llvm/lib/IR/Function.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 880fe0e271f5..f985e5848a39 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -411,6 +411,25 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   }
 }
 
+// Emit an intrinsic where all operands are of the same type as the result.
+// Depending on mode, this may be a constrained floating-point intrinsic.
+static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
+                                                unsigned IntrinsicID,
+                                                unsigned ConstrainedIntrinsicID,
+                                                llvm::Type *Ty,
+                                                ArrayRef<Value *> Args) {
+  Function *F;
+  if (CGF.Builder.getIsFPConstrained())
+    F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
+  else
+    F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
+
+  if (CGF.Builder.getIsFPConstrained())
+    return CGF.Builder.CreateConstrainedFPCall(F, Args);
+  else
+    return CGF.Builder.CreateCall(F, Args);
+}
+
 // Emit a simple mangled intrinsic that has 1 argument and a return type
 // matching the argument type.
 static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -4511,13 +4530,20 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
                                      unsigned shift, bool rightshift) {
   unsigned j = 0;
   for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
-       ai != ae; ++ai, ++j)
+       ai != ae; ++ai, ++j) {
+    if (F->isConstrainedFPIntrinsic())
+      if (ai->getType()->isMetadataTy())
+        continue;
     if (shift > 0 && shift == j)
       Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
     else
       Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+  }
 
-  return Builder.CreateCall(F, Ops, name);
+  if (F->isConstrainedFPIntrinsic())
+    return Builder.CreateConstrainedFPCall(F, Ops, name);
+  else
+    return Builder.CreateCall(F, Ops, name);
 }
 
 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
@@ -5695,13 +5721,14 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
   }
   case NEON::BI__builtin_neon_vfma_v:
   case NEON::BI__builtin_neon_vfmaq_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
 
     // NEON intrinsic puts accumulator first, unlike the LLVM fma.
-    return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+    return emitCallMaybeConstrainedFPBuiltin(
+        *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+        {Ops[1], Ops[2], Ops[0]});
   }
   case NEON::BI__builtin_neon_vld1_v:
   case NEON::BI__builtin_neon_vld1q_v: {
@@ -5859,7 +5886,9 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
   case NEON::BI__builtin_neon_vrndi_v:
   case NEON::BI__builtin_neon_vrndiq_v:
-    Int = Intrinsic::nearbyint;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_nearbyint
+              : Intrinsic::nearbyint;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
   case NEON::BI__builtin_neon_vrshr_n_v:
   case NEON::BI__builtin_neon_vrshrq_n_v:
@@ -8459,18 +8488,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
   case NEON::BI__builtin_neon_vdivh_f16:
     Ops.push_back(EmitScalarExpr(E->getArg(1)));
     return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
-  case NEON::BI__builtin_neon_vfmah_f16: {
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+  case NEON::BI__builtin_neon_vfmah_f16:
     // NEON intrinsic puts accumulator first, unlike the LLVM fma.
-    return Builder.CreateCall(F,
-      {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
-  }
+    return emitCallMaybeConstrainedFPBuiltin(
+        *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
+        {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
   case NEON::BI__builtin_neon_vfmsh_f16: {
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+    // FIXME: This should be an fneg instruction:
     Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
     Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+
     // NEON intrinsic puts accumulator first, unlike the LLVM fma.
-    return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+    return emitCallMaybeConstrainedFPBuiltin(
+        *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
+        {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
   }
   case NEON::BI__builtin_neon_vaddd_s64:
   case NEON::BI__builtin_neon_vaddd_u64:
@@ -8837,7 +8868,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
     Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
 
     Ops.pop_back();
-    Int = Intrinsic::fma;
+    Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
+                                       : Intrinsic::fma;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
   }
   case NEON::BI__builtin_neon_vfma_laneq_v: {
@@ -8850,11 +8882,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
         NeonTypeFlags(NeonTypeFlags::Float64, false, true));
       Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
       Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
-      Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
-      Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+      Value *Result;
+      Result = emitCallMaybeConstrainedFPBuiltin(
+          *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
+          DoubleTy, {Ops[1], Ops[2], Ops[0]});
       return Builder.CreateBitCast(Result, Ty);
     }
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
 
@@ -8865,16 +8898,19 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
                                                cast<ConstantInt>(Ops[3]));
     Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
 
-    return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+    return emitCallMaybeConstrainedFPBuiltin(
+        *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+        {Ops[2], Ops[1], Ops[0]});
   }
   case NEON::BI__builtin_neon_vfmaq_laneq_v: {
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
 
     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
     Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
-    return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+    return emitCallMaybeConstrainedFPBuiltin(
+        *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+        {Ops[2], Ops[1], Ops[0]});
   }
   case NEON::BI__builtin_neon_vfmah_lane_f16:
   case NEON::BI__builtin_neon_vfmas_lane_f32:
@@ -8884,9 +8920,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
   case NEON::BI__builtin_neon_vfmad_laneq_f64: {
     Ops.push_back(EmitScalarExpr(E->getArg(3)));
     llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
     Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
-    return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+    return emitCallMaybeConstrainedFPBuiltin(
+        *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+        {Ops[1], Ops[2], Ops[0]});
   }
   case NEON::BI__builtin_neon_vmull_v:
     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
@@ -8995,27 +9032,37 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
   case NEON::BI__builtin_neon_vrndah_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::round;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_round
+              : Intrinsic::round;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
   }
   case NEON::BI__builtin_neon_vrnda_v:
   case NEON::BI__builtin_neon_vrndaq_v: {
-    Int = Intrinsic::round;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_round
+              : Intrinsic::round;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
   }
   case NEON::BI__builtin_neon_vrndih_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::nearbyint;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_nearbyint
+              : Intrinsic::nearbyint;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
   }
   case NEON::BI__builtin_neon_vrndmh_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::floor;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_floor
+              : Intrinsic::floor;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
   }
   case NEON::BI__builtin_neon_vrndm_v:
   case NEON::BI__builtin_neon_vrndmq_v: {
-    Int = Intrinsic::floor;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_floor
+              : Intrinsic::floor;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
   }
   case NEON::BI__builtin_neon_vrndnh_f16: {
@@ -9035,32 +9082,44 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
   }
   case NEON::BI__builtin_neon_vrndph_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::ceil;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_ceil
+              : Intrinsic::ceil;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
   }
   case NEON::BI__builtin_neon_vrndp_v:
   case NEON::BI__builtin_neon_vrndpq_v: {
-    Int = Intrinsic::ceil;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_ceil
+              : Intrinsic::ceil;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
   }
   case NEON::BI__builtin_neon_vrndxh_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::rint;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_rint
+              : Intrinsic::rint;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
   }
   case NEON::BI__builtin_neon_vrndx_v:
   case NEON::BI__builtin_neon_vrndxq_v: {
-    Int = Intrinsic::rint;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_rint
+              : Intrinsic::rint;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
   }
   case NEON::BI__builtin_neon_vrndh_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::trunc;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_trunc
+              : Intrinsic::trunc;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
   }
   case NEON::BI__builtin_neon_vrnd_v:
   case NEON::BI__builtin_neon_vrndq_v: {
-    Int = Intrinsic::trunc;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_trunc
+              : Intrinsic::trunc;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
   }
   case NEON::BI__builtin_neon_vcvt_f64_v:
@@ -9211,12 +9270,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
   }
   case NEON::BI__builtin_neon_vsqrth_f16: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Intrinsic::sqrt;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_sqrt
+              : Intrinsic::sqrt;
     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
   }
   case NEON::BI__builtin_neon_vsqrt_v:
   case NEON::BI__builtin_neon_vsqrtq_v: {
-    Int = Intrinsic::sqrt;
+    Int = Builder.getIsFPConstrained()
+              ? Intrinsic::experimental_constrained_sqrt
+              : Intrinsic::sqrt;
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
   }

diff  --git a/clang/test/CodeGen/aarch64-neon-intrinsics-constrained.c b/clang/test/CodeGen/aarch64-neon-intrinsics-constrained.c
new file mode 100644
index 000000000000..25e205193ca7
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-neon-intrinsics-constrained.c
@@ -0,0 +1,956 @@
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:     -fallow-half-arguments-and-returns -S -disable-O0-optnone \
+// RUN:  -flax-vector-conversions=none -emit-llvm -o - %s | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:     -fallow-half-arguments-and-returns -S -disable-O0-optnone \
+// RUN:  -ffp-exception-behavior=strict \
+// RUN:  -flax-vector-conversions=none -emit-llvm -o - %s | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:     -fallow-half-arguments-and-returns -S -disable-O0-optnone \
+// RUN:  -flax-vector-conversions=none -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:     -fallow-half-arguments-and-returns -S -disable-O0-optnone \
+// RUN:  -ffp-exception-behavior=strict \
+// RUN:  -flax-vector-conversions=none -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+// REQUIRES: aarch64-registered-target
+
+// Fails during instruction selection:
+// XFAIL: *
+
+// Test new aarch64 intrinsics and types but constrained
+
+#include <arm_neon.h>
+
+// COMMON-LABEL: test_vadd_f32
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <2 x float> %v1, %v2
+// CONSTRAINED:   [[ADD_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fadd v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[ADD_I]]
+float32x2_t test_vadd_f32(float32x2_t v1, float32x2_t v2) {
+  return vadd_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vaddq_f32
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <4 x float> %v1, %v2
+// CONSTRAINED:   [[ADD_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fadd v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[ADD_I]]
+float32x4_t test_vaddq_f32(float32x4_t v1, float32x4_t v2) {
+  return vaddq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vsub_f32
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x float> %v1, %v2
+// CONSTRAINED:   [[SUB_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsub v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[SUB_I]]
+float32x2_t test_vsub_f32(float32x2_t v1, float32x2_t v2) {
+  return vsub_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vsubq_f32
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <4 x float> %v1, %v2
+// CONSTRAINED:   [[SUB_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsub v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[SUB_I]]
+float32x4_t test_vsubq_f32(float32x4_t v1, float32x4_t v2) {
+  return vsubq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vsubq_f64
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x double> %v1, %v2
+// CONSTRAINED:   [[SUB_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsub v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[SUB_I]]
+float64x2_t test_vsubq_f64(float64x2_t v1, float64x2_t v2) {
+  return vsubq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vmul_f32
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x float> %v1, %v2
+// CONSTRAINED:   [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[MUL_I]]
+float32x2_t test_vmul_f32(float32x2_t v1, float32x2_t v2) {
+  return vmul_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vmulq_f32
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <4 x float> %v1, %v2
+// CONSTRAINED:   [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[MUL_I]]
+float32x4_t test_vmulq_f32(float32x4_t v1, float32x4_t v2) {
+  return vmulq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vmulq_f64
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x double> %v1, %v2
+// CONSTRAINED:   [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[MUL_I]]
+float64x2_t test_vmulq_f64(float64x2_t v1, float64x2_t v2) {
+  return vmulq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vmla_f32
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x float> %v2, %v3
+// CONSTRAINED:   [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %v2, <2 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul [[MUL_R:v[0-9]+.2s]], v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <2 x float> %v1, [[MUL_I]]
+// CONSTRAINED:   [[ADD_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %v1, <2 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM-NEXT:fadd v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, [[MUL_R]]
+// COMMONIR:      ret <2 x float> [[ADD_I]]
+float32x2_t test_vmla_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
+  return vmla_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vmlaq_f32
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <4 x float> %v2, %v3
+// CONSTRAINED:   [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %v2, <4 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul [[MUL_R:v[0-9]+.4s]], v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <4 x float> %v1, [[MUL_I]]
+// CONSTRAINED:   [[ADD_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %v1, <4 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM-NEXT:fadd v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, [[MUL_R]]
+// COMMONIR:      ret <4 x float> [[ADD_I]]
+float32x4_t test_vmlaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
+  return vmlaq_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vmlaq_f64
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x double> %v2, %v3
+// CONSTRAINED:   [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %v2, <2 x double> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul [[MUL_R:v[0-9]+.2d]], v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <2 x double> %v1, [[MUL_I]]
+// CONSTRAINED:   [[ADD_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %v1, <2 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM-NEXT:fadd v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, [[MUL_R]]
+// COMMONIR:      ret <2 x double> [[ADD_I]]
+float64x2_t test_vmlaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
+  return vmlaq_f64(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vmls_f32
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x float> %v2, %v3
+// CONSTRAINED:   [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %v2, <2 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul [[MUL_R:v[0-9]+.2s]], v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x float> %v1, [[MUL_I]]
+// CONSTRAINED:   [[SUB_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %v1, <2 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM-NEXT:fsub v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, [[MUL_R]]
+// COMMONIR:      ret <2 x float> [[SUB_I]]
+float32x2_t test_vmls_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
+  return vmls_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vmlsq_f32
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <4 x float> %v2, %v3
+// CONSTRAINED:   [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %v2, <4 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul [[MUL_R:v[0-9]+.4s]], v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <4 x float> %v1, [[MUL_I]]
+// CONSTRAINED:   [[SUB_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %v1, <4 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM-NEXT:fsub v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, [[MUL_R]]
+// COMMONIR:   ret <4 x float> [[SUB_I]]
+float32x4_t test_vmlsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
+  return vmlsq_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vmlsq_f64
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x double> %v2, %v3
+// CONSTRAINED:   [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %v2, <2 x double> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul [[MUL_R:v[0-9]+.2d]], v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x double> %v1, [[MUL_I]]
+// CONSTRAINED:   [[SUB_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %v1, <2 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM-NEXT:fsub v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, [[MUL_R]]
+// COMMONIR:      ret <2 x double> [[SUB_I]]
+float64x2_t test_vmlsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
+  return vmlsq_f64(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vfma_f32
+// COMMONIR:      [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <2 x float> %v3 to <8 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> %v2, <2 x float> %v3, <2 x float> %v1)
+// CONSTRAINED:   [[TMP3:%.*]] = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> %v2, <2 x float> %v3, <2 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[TMP3]]
+float32x2_t test_vfma_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
+  return vfma_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vfmaq_f32
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %v2, <4 x float> %v3, <4 x float> %v1)
+// CONSTRAINED:   [[TMP3:%.*]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %v2, <4 x float> %v3, <4 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[TMP3]]
+float32x4_t test_vfmaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
+  return vfmaq_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vfmaq_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %v2, <2 x double> %v3, <2 x double> %v1)
+// CONSTRAINED:   [[TMP3:%.*]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %v2, <2 x double> %v3, <2 x double> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[TMP3]]
+float64x2_t test_vfmaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
+  return vfmaq_f64(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vfms_f32
+// COMMONIR:      [[SUB_I:%.*]] = fneg <2 x float> %v2
+// CHECK-ASM:     fneg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <2 x float> [[SUB_I]] to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <2 x float> %v3 to <8 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> [[SUB_I]], <2 x float> %v3, <2 x float> %v1)
+// CONSTRAINED:   [[TMP3:%.*]] = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> [[SUB_I]], <2 x float> %v3, <2 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[TMP3]]
+float32x2_t test_vfms_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
+  return vfms_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vfmsq_f32
+// COMMONIR:      [[SUB_I:%.*]] = fneg <4 x float> %v2
+// CHECK-ASM:     fneg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <4 x float> [[SUB_I]] to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[SUB_I]], <4 x float> %v3, <4 x float> %v1)
+// CONSTRAINED:   [[TMP3:%.*]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> [[SUB_I]], <4 x float> %v3, <4 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[TMP3]]
+float32x4_t test_vfmsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
+  return vfmsq_f32(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vfmsq_f64
+// COMMONIR:      [[SUB_I:%.*]] = fneg <2 x double> %v2
+// CHECK-ASM:     fneg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <2 x double> [[SUB_I]] to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> %v3, <2 x double> %v1)
+// CONSTRAINED:   [[TMP3:%.*]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> %v3, <2 x double> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[TMP3]]
+float64x2_t test_vfmsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
+  return vfmsq_f64(v1, v2, v3);
+}
+
+// COMMON-LABEL: test_vdivq_f64
+// UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <2 x double> %v1, %v2
+// CONSTRAINED:   [[DIV_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fdiv v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[DIV_I]]
+float64x2_t test_vdivq_f64(float64x2_t v1, float64x2_t v2) {
+  return vdivq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vdivq_f32
+// UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <4 x float> %v1, %v2
+// CONSTRAINED:   [[DIV_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fdiv v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[DIV_I]]
+float32x4_t test_vdivq_f32(float32x4_t v1, float32x4_t v2) {
+  return vdivq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vdiv_f32
+// UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <2 x float> %v1, %v2
+// CONSTRAINED:   [[DIV_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fdiv v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[DIV_I]]
+float32x2_t test_vdiv_f32(float32x2_t v1, float32x2_t v2) {
+  return vdiv_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vceq_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <2 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmeq v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
+// COMMONIR:      ret <2 x i32> [[SEXT_I]]
+uint32x2_t test_vceq_f32(float32x2_t v1, float32x2_t v2) {
+  return vceq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vceq_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <1 x double> %a, %b
+// CONSTRAINED:   [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double> %a, <1 x double> %b, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, eq
+// COMMONIR:      [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
+// COMMONIR:      ret <1 x i64> [[SEXT_I]]
+uint64x1_t test_vceq_f64(float64x1_t a, float64x1_t b) {
+  return vceq_f64(a, b);
+}
+
+// COMMON-LABEL: test_vceqq_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <4 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmeq v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
+// COMMONIR:      ret <4 x i32> [[SEXT_I]]
+uint32x4_t test_vceqq_f32(float32x4_t v1, float32x4_t v2) {
+  return vceqq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vceqq_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <2 x double> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmeq v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
+// COMMONIR:      ret <2 x i64> [[SEXT_I]]
+uint64x2_t test_vceqq_f64(float64x2_t v1, float64x2_t v2) {
+  return vceqq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vcge_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <2 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmge v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
+// COMMONIR:      ret <2 x i32> [[SEXT_I]]
+uint32x2_t test_vcge_f32(float32x2_t v1, float32x2_t v2) {
+  return vcge_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcge_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <1 x double> %a, %b
+// CONSTRAINED:   [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ge
+// COMMONIR:      [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
+// COMMONIR:      ret <1 x i64> [[SEXT_I]]
+uint64x1_t test_vcge_f64(float64x1_t a, float64x1_t b) {
+  return vcge_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcgeq_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <4 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmge v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
+// COMMONIR:      ret <4 x i32> [[SEXT_I]]
+uint32x4_t test_vcgeq_f32(float32x4_t v1, float32x4_t v2) {
+  return vcgeq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcgeq_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <2 x double> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmge v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
+// COMMONIR:      ret <2 x i64> [[SEXT_I]]
+uint64x2_t test_vcgeq_f64(float64x2_t v1, float64x2_t v2) {
+  return vcgeq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vcle_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <2 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmge v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
+// COMMONIR:      ret <2 x i32> [[SEXT_I]]
+uint32x2_t test_vcle_f32(float32x2_t v1, float32x2_t v2) {
+  return vcle_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcle_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <1 x double> %a, %b
+// CONSTRAINED:   [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ls
+// COMMONIR:      [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
+// COMMONIR:      ret <1 x i64> [[SEXT_I]]
+uint64x1_t test_vcle_f64(float64x1_t a, float64x1_t b) {
+  return vcle_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcleq_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <4 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmge v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
+// COMMONIR:      ret <4 x i32> [[SEXT_I]]
+uint32x4_t test_vcleq_f32(float32x4_t v1, float32x4_t v2) {
+  return vcleq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcleq_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <2 x double> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmge v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
+// COMMONIR:      ret <2 x i64> [[SEXT_I]]
+uint64x2_t test_vcleq_f64(float64x2_t v1, float64x2_t v2) {
+  return vcleq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vcgt_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <2 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmgt v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
+// COMMONIR:      ret <2 x i32> [[SEXT_I]]
+uint32x2_t test_vcgt_f32(float32x2_t v1, float32x2_t v2) {
+  return vcgt_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcgt_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <1 x double> %a, %b
+// CONSTRAINED:   [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, gt
+// COMMONIR:      [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
+// COMMONIR:      ret <1 x i64> [[SEXT_I]]
+uint64x1_t test_vcgt_f64(float64x1_t a, float64x1_t b) {
+  return vcgt_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcgtq_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <4 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmgt v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
+// COMMONIR:      ret <4 x i32> [[SEXT_I]]
+uint32x4_t test_vcgtq_f32(float32x4_t v1, float32x4_t v2) {
+  return vcgtq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcgtq_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <2 x double> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmgt v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
+// COMMONIR:      ret <2 x i64> [[SEXT_I]]
+uint64x2_t test_vcgtq_f64(float64x2_t v1, float64x2_t v2) {
+  return vcgtq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vclt_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <2 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmgt v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
+// COMMONIR:      ret <2 x i32> [[SEXT_I]]
+uint32x2_t test_vclt_f32(float32x2_t v1, float32x2_t v2) {
+  return vclt_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vclt_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <1 x double> %a, %b
+// CONSTRAINED:   [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, mi
+// COMMONIR:      [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
+// COMMONIR:      ret <1 x i64> [[SEXT_I]]
+uint64x1_t test_vclt_f64(float64x1_t a, float64x1_t b) {
+  return vclt_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcltq_f32
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <4 x float> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmgt v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
+// COMMONIR:      ret <4 x i32> [[SEXT_I]]
+uint32x4_t test_vcltq_f32(float32x4_t v1, float32x4_t v2) {
+  return vcltq_f32(v1, v2);
+}
+
+// COMMON-LABEL: test_vcltq_f64
+// UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <2 x double> %v1, %v2
+// CONSTRAINED:   [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmgt v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
+// COMMONIR:      ret <2 x i64> [[SEXT_I]]
+uint64x2_t test_vcltq_f64(float64x2_t v1, float64x2_t v2) {
+  return vcltq_f64(v1, v2);
+}
+
+// COMMON-LABEL: test_vpadds_f32
+// COMMONIR:      [[LANE0_I:%.*]] = extractelement <2 x float> %a, i64 0
+// COMMONIR:      [[LANE1_I:%.*]] = extractelement <2 x float> %a, i64 1
+// UNCONSTRAINED: [[VPADDD_I:%.*]] = fadd float [[LANE0_I]], [[LANE1_I]]
+// CONSTRAINED:   [[VPADDD_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[LANE0_I]], float [[LANE1_I]], metadata !"round.tonearest", metadata !"fpexcept.strict"
+// CHECK-ASM:     fadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+// COMMONIR:      ret float [[VPADDD_I]]
+float32_t test_vpadds_f32(float32x2_t a) {
+  return vpadds_f32(a);
+}
+
+// COMMON-LABEL: test_vpaddd_f64
+// COMMONIR:      [[LANE0_I:%.*]] = extractelement <2 x double> %a, i64 0
+// COMMONIR:      [[LANE1_I:%.*]] = extractelement <2 x double> %a, i64 1
+// UNCONSTRAINED: [[VPADDD_I:%.*]] = fadd double [[LANE0_I]], [[LANE1_I]]
+// CONSTRAINED:   [[VPADDD_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[LANE0_I]], double [[LANE1_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     faddp d{{[0-9]+}}, v{{[0-9]+}}.2d
+// COMMONIR:      ret double [[VPADDD_I]]
+float64_t test_vpaddd_f64(float64x2_t a) {
+  return vpaddd_f64(a);
+}
+
+// COMMON-LABEL: test_vcvts_f32_s32
+// UNCONSTRAINED: [[TMP0:%.*]] = sitofp i32 %a to float
+// CONSTRAINED:   [[TMP0:%.*]] = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     scvtf s{{[0-9]+}}, w{{[0-9]+}}
+// COMMONIR:      ret float [[TMP0]]
+float32_t test_vcvts_f32_s32(int32_t a) {
+  return vcvts_f32_s32(a);
+}
+
+// COMMON-LABEL: test_vcvtd_f64_s64
+// UNCONSTRAINED: [[TMP0:%.*]] = sitofp i64 %a to double
+// CONSTRAINED:   [[TMP0:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     scvtf d{{[0-9]}}, x{{[0-9]+}}
+// COMMONIR:      ret double [[TMP0]]
+float64_t test_vcvtd_f64_s64(int64_t a) {
+  return vcvtd_f64_s64(a);
+}
+
+// COMMON-LABEL: test_vcvts_f32_u32
+// UNCONSTRAINED: [[TMP0:%.*]] = uitofp i32 %a to float
+// CONSTRAINED:   [[TMP0:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     ucvtf s{{[0-9]+}}, w{{[0-9]+}}
+// COMMONIR:      ret float [[TMP0]]
+float32_t test_vcvts_f32_u32(uint32_t a) {
+  return vcvts_f32_u32(a);
+}
+
+// XXX should verify the type of registers
+// COMMON-LABEL: test_vcvtd_f64_u64
+// UNCONSTRAINED: [[TMP0:%.*]] = uitofp i64 %a to double
+// CONSTRAINED:   [[TMP0:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     ucvtf d{{[0-9]}}, x{{[0-9]+}}
+// COMMONIR:      ret double [[TMP0]]
+float64_t test_vcvtd_f64_u64(uint64_t a) {
+  return vcvtd_f64_u64(a);
+}
+
+// COMMON-LABEL: test_vceqs_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq float %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, eq
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCMPD_I]]
+uint32_t test_vceqs_f32(float32_t a, float32_t b) {
+  return (uint32_t)vceqs_f32(a, b);
+}
+
+// COMMON-LABEL: test_vceqd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq double %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, eq
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCMPD_I]]
+uint64_t test_vceqd_f64(float64_t a, float64_t b) {
+  return (uint64_t)vceqd_f64(a, b);
+}
+
+// COMMON-LABEL: test_vceqzs_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq float %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, eq
+// COMMONIR:      [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCEQZ_I]]
+uint32_t test_vceqzs_f32(float32_t a) {
+  return (uint32_t)vceqzs_f32(a);
+}
+
+// COMMON-LABEL: test_vceqzd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq double %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, eq
+// COMMONIR:      [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCEQZ_I]]
+uint64_t test_vceqzd_f64(float64_t a) {
+  return (uint64_t)vceqzd_f64(a);
+}
+
+// COMMON-LABEL: test_vcges_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge float %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ge
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCMPD_I]]
+uint32_t test_vcges_f32(float32_t a, float32_t b) {
+  return (uint32_t)vcges_f32(a, b);
+}
+
+// COMMON-LABEL: test_vcged_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge double %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ge
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCMPD_I]]
+uint64_t test_vcged_f64(float64_t a, float64_t b) {
+  return (uint64_t)vcged_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcgezs_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge float %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float 0.000000e+00, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ge
+// COMMONIR:      [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCGEZ_I]]
+uint32_t test_vcgezs_f32(float32_t a) {
+  return (uint32_t)vcgezs_f32(a);
+}
+
+// COMMON-LABEL: test_vcgezd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge double %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double 0.000000e+00, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ge
+// COMMONIR:      [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCGEZ_I]]
+uint64_t test_vcgezd_f64(float64_t a) {
+  return (uint64_t)vcgezd_f64(a);
+}
+
+// COMMON-LABEL: test_vcgts_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt float %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, gt
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCMPD_I]]
+uint32_t test_vcgts_f32(float32_t a, float32_t b) {
+  return (uint32_t)vcgts_f32(a, b);
+}
+
+// COMMON-LABEL: test_vcgtd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt double %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, gt
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCMPD_I]]
+uint64_t test_vcgtd_f64(float64_t a, float64_t b) {
+  return (uint64_t)vcgtd_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcgtzs_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt float %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float 0.000000e+00, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, gt
+// COMMONIR:      [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCGTZ_I]]
+uint32_t test_vcgtzs_f32(float32_t a) {
+  return (uint32_t)vcgtzs_f32(a);
+}
+
+// COMMON-LABEL: test_vcgtzd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt double %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double 0.000000e+00, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, gt
+// COMMONIR:      [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCGTZ_I]]
+uint64_t test_vcgtzd_f64(float64_t a) {
+  return (uint64_t)vcgtzd_f64(a);
+}
+
+// COMMON-LABEL: test_vcles_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole float %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ls
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCMPD_I]]
+uint32_t test_vcles_f32(float32_t a, float32_t b) {
+  return (uint32_t)vcles_f32(a, b);
+}
+
+// COMMON-LABEL: test_vcled_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole double %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ls
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCMPD_I]]
+uint64_t test_vcled_f64(float64_t a, float64_t b) {
+  return (uint64_t)vcled_f64(a, b);
+}
+
+// COMMON-LABEL: test_vclezs_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole float %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float 0.000000e+00, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ls
+// COMMONIR:      [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCLEZ_I]]
+uint32_t test_vclezs_f32(float32_t a) {
+  return (uint32_t)vclezs_f32(a);
+}
+
+// COMMON-LABEL: test_vclezd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole double %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double 0.000000e+00, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, ls
+// COMMONIR:      [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCLEZ_I]]
+uint64_t test_vclezd_f64(float64_t a) {
+  return (uint64_t)vclezd_f64(a);
+}
+
+// COMMON-LABEL: test_vclts_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt float %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, mi
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCMPD_I]]
+uint32_t test_vclts_f32(float32_t a, float32_t b) {
+  return (uint32_t)vclts_f32(a, b);
+}
+
+// COMMON-LABEL: test_vcltd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt double %a, %b
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, d{{[0-9]+}}
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, mi
+// COMMONIR:      [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCMPD_I]]
+uint64_t test_vcltd_f64(float64_t a, float64_t b) {
+  return (uint64_t)vcltd_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcltzs_f32
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt float %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float 0.000000e+00, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp s{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, mi
+// COMMONIR:      [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i32
+// COMMONIR:      ret i32 [[VCLTZ_I]]
+uint32_t test_vcltzs_f32(float32_t a) {
+  return (uint32_t)vcltzs_f32(a);
+}
+
+// COMMON-LABEL: test_vcltzd_f64
+// UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt double %a, 0.000000e+00
+// CONSTRAINED:   [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double 0.000000e+00, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcmp d{{[0-9]+}}, #0.0
+// CHECK-ASM-NEXT:cset {{w[0-9]+}}, mi
+// COMMONIR:      [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i64
+// COMMONIR:      ret i64 [[VCLTZ_I]]
+uint64_t test_vcltzd_f64(float64_t a) {
+  return (uint64_t)vcltzd_f64(a);
+}
+
+// COMMON-LABEL: test_vadd_f64
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <1 x double> %a, %b
+// CONSTRAINED:   [[ADD_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[ADD_I]]
+float64x1_t test_vadd_f64(float64x1_t a, float64x1_t b) {
+  return vadd_f64(a, b);
+}
+
+// COMMON-LABEL: test_vmul_f64
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <1 x double> %a, %b
+// CONSTRAINED:   [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[MUL_I]]
+float64x1_t test_vmul_f64(float64x1_t a, float64x1_t b) {
+  return vmul_f64(a, b);
+}
+
+// COMMON-LABEL: test_vdiv_f64
+// UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <1 x double> %a, %b
+// CONSTRAINED:   [[DIV_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[DIV_I]]
+float64x1_t test_vdiv_f64(float64x1_t a, float64x1_t b) {
+  return vdiv_f64(a, b);
+}
+
+// COMMON-LABEL: test_vmla_f64
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <1 x double> %b, %c
+// CONSTRAINED:   [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %b, <1 x double> %c, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// UNCONSTRAINED: [[ADD_I:%.*]] = fadd <1 x double> %a, [[MUL_I]]
+// CONSTRAINED:   [[ADD_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %a, <1 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[ADD_I]]
+float64x1_t test_vmla_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
+  return vmla_f64(a, b, c);
+}
+
+// COMMON-LABEL: test_vmls_f64
+// UNCONSTRAINED: [[MUL_I:%.*]] = fmul <1 x double> %b, %c
+// CONSTRAINED:   [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %b, <1 x double> %c, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <1 x double> %a, [[MUL_I]]
+// CONSTRAINED:   [[SUB_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %a, <1 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[SUB_I]]
+float64x1_t test_vmls_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
+  return vmls_f64(a, b, c);
+}
+
+// COMMON-LABEL: test_vfma_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <1 x double> %c to <8 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
+// CONSTRAINED:   [[TMP3:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[TMP3]]
+float64x1_t test_vfma_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
+  return vfma_f64(a, b, c);
+}
+
+// COMMON-LABEL: test_vfms_f64
+// COMMONIR:      [[SUB_I:%.*]] = fneg <1 x double> %b
+// CHECK-ASM:     fneg d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <1 x double> [[SUB_I]] to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <1 x double> %c to <8 x i8>
+// UNCONSTRAINED: [[TMP3:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[SUB_I]], <1 x double> %c, <1 x double> %a)
+// CONSTRAINED:   [[TMP3:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> [[SUB_I]], <1 x double> %c, <1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[TMP3]]
+float64x1_t test_vfms_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
+  return vfms_f64(a, b, c);
+}
+
+// COMMON-LABEL: test_vsub_f64
+// UNCONSTRAINED: [[SUB_I:%.*]] = fsub <1 x double> %a, %b
+// CONSTRAINED:   [[SUB_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[SUB_I]]
+float64x1_t test_vsub_f64(float64x1_t a, float64x1_t b) {
+  return vsub_f64(a, b);
+}
+
+// COMMON-LABEL: test_vcvt_s64_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[TMP1:%.*]] = fptosi <1 x double> %a to <1 x i64>
+// CONSTRAINED:   [[TMP1:%.*]] = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     fcvtzs x{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x i64> [[TMP1]]
+int64x1_t test_vcvt_s64_f64(float64x1_t a) {
+  return vcvt_s64_f64(a);
+}
+
+// COMMON-LABEL: test_vcvt_u64_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[TMP1:%.*]] = fptoui <1 x double> %a to <1 x i64>
+// CONSTRAINED:   [[TMP1:%.*]] = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     fcvtzu x{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x i64> [[TMP1]]
+uint64x1_t test_vcvt_u64_f64(float64x1_t a) {
+  return vcvt_u64_f64(a);
+}
+
+// COMMON-LABEL: test_vcvt_f64_s64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
+// UNCONSTRAINED: [[VCVT_I:%.*]] = sitofp <1 x i64> %a to <1 x double>
+// CONSTRAINED:   [[VCVT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     scvtf d{{[0-9]+}}, x{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VCVT_I]]
+float64x1_t test_vcvt_f64_s64(int64x1_t a) {
+  return vcvt_f64_s64(a);
+}
+
+// COMMON-LABEL: test_vcvt_f64_u64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
+// UNCONSTRAINED: [[VCVT_I:%.*]] = uitofp <1 x i64> %a to <1 x double>
+// CONSTRAINED:   [[VCVT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     ucvtf d{{[0-9]+}}, x{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VCVT_I]]
+float64x1_t test_vcvt_f64_u64(uint64x1_t a) {
+  return vcvt_f64_u64(a);
+}
+
+// COMMON-LABEL: test_vrnda_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDA1_I:%.*]] = call <1 x double> @llvm.round.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VRNDA1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     frinta d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VRNDA1_I]]
+float64x1_t test_vrnda_f64(float64x1_t a) {
+  return vrnda_f64(a);
+}
+
+// COMMON-LABEL: test_vrndp_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDP1_I:%.*]] = call <1 x double> @llvm.ceil.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VRNDP1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     frintp d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VRNDP1_I]]
+float64x1_t test_vrndp_f64(float64x1_t a) {
+  return vrndp_f64(a);
+}
+
+// COMMON-LABEL: test_vrndm_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDM1_I:%.*]] = call <1 x double> @llvm.floor.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VRNDM1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     frintm d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VRNDM1_I]]
+float64x1_t test_vrndm_f64(float64x1_t a) {
+  return vrndm_f64(a);
+}
+
+// COMMON-LABEL: test_vrndx_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDX1_I:%.*]] = call <1 x double> @llvm.rint.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VRNDX1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     frintx d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VRNDX1_I]]
+float64x1_t test_vrndx_f64(float64x1_t a) {
+  return vrndx_f64(a);
+}
+
+// COMMON-LABEL: test_vrnd_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     frintz d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VRNDZ1_I]]
+float64x1_t test_vrnd_f64(float64x1_t a) {
+  return vrnd_f64(a);
+}
+
+// COMMON-LABEL: test_vrndi_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDI1_I:%.*]] = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VRNDI1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     frinti d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VRNDI1_I]]
+float64x1_t test_vrndi_f64(float64x1_t a) {
+  return vrndi_f64(a);
+}
+
+// COMMON-LABEL: test_vsqrt_f64
+// COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// UNCONSTRAINED: [[VSQRT_I:%.*]] = call <1 x double> @llvm.sqrt.v1f64(<1 x double> %a)
+// CONSTRAINED:   [[VSQRT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsqrt d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:      ret <1 x double> [[VSQRT_I]]
+float64x1_t test_vsqrt_f64(float64x1_t a) {
+  return vsqrt_f64(a);
+}

diff  --git a/clang/test/CodeGen/aarch64-neon-misc-constrained.c b/clang/test/CodeGen/aarch64-neon-misc-constrained.c
new file mode 100644
index 000000000000..0385358291c9
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-neon-misc-constrained.c
@@ -0,0 +1,60 @@
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:  -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:  -ffp-exception-behavior=strict \
+// RUN:  -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:  -disable-O0-optnone -fallow-half-arguments-and-returns -S -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:  -ffp-exception-behavior=strict \
+// RUN:  -disable-O0-optnone -fallow-half-arguments-and-returns -S -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+// REQUIRES: aarch64-registered-target
+
+// Test new aarch64 intrinsics and types but constrained
+
+#include <arm_neon.h>
+
+// COMMON-LABEL: test_vrndaq_f64
+// COMMONIR:   [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
+// UNCONSTRAINED: [[VRNDA1_I:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> %a)
+// CONSTRAINED:   [[VRNDA1_I:%.*]] = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     frinta v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[VRNDA1_I]]
+float64x2_t test_vrndaq_f64(float64x2_t a) {
+  return vrndaq_f64(a);
+}
+
+// COMMON-LABEL: test_vrndpq_f64
+// COMMONIR:   [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
+// UNCONSTRAINED: [[VRNDP1_I:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a)
+// CONSTRAINED:   [[VRNDP1_I:%.*]] = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %a, metadata !"fpexcept.strict")
+// CHECK-ASM:     frintp v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[VRNDP1_I]]
+float64x2_t test_vrndpq_f64(float64x2_t a) {
+  return vrndpq_f64(a);
+}
+
+// COMMON-LABEL: test_vsqrtq_f32
+// COMMONIR:   [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
+// UNCONSTRAINED: [[VSQRT_I:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
+// CONSTRAINED:   [[VSQRT_I:%.*]] = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsqrt v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[VSQRT_I]]
+float32x4_t test_vsqrtq_f32(float32x4_t a) {
+  return vsqrtq_f32(a);
+}
+
+// COMMON-LABEL: test_vsqrtq_f64
+// COMMONIR:   [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
+// UNCONSTRAINED: [[VSQRT_I:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a)
+// CONSTRAINED:   [[VSQRT_I:%.*]] = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fsqrt v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+// COMMONIR:      ret <2 x double> [[VSQRT_I]]
+float64x2_t test_vsqrtq_f64(float64x2_t a) {
+  return vsqrtq_f64(a);
+}

diff  --git a/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem-constrained.c b/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem-constrained.c
new file mode 100644
index 000000000000..cbe5627337fd
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem-constrained.c
@@ -0,0 +1,131 @@
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-cpu cyclone \
+// RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-cpu cyclone \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-cpu cyclone \
+// RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | llc -o=- - \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-cpu cyclone \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | llc -o=- - \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+// REQUIRES: aarch64-registered-target
+
+// Test new aarch64 intrinsics and types but constrained
+
+#include <arm_neon.h>
+
+// COMMON-LABEL: test_vfmas_lane_f32
+// COMMONIR:        [[EXTRACT:%.*]] = extractelement <2 x float> %c, i32 1
+// UNCONSTRAINED:   [[TMP2:%.*]] = call float @llvm.fma.f32(float %b, float [[EXTRACT]], float %a)
+// CONSTRAINED:     [[TMP2:%.*]] = call float @llvm.experimental.constrained.fma.f32(float %b, float [[EXTRACT]], float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmla s{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}.s[{{[0-9]+}}]
+// COMMONIR:        ret float [[TMP2]]
+float32_t test_vfmas_lane_f32(float32_t a, float32_t b, float32x2_t c) {
+  return vfmas_lane_f32(a, b, c, 1);
+}
+
+// COMMON-LABEL: test_vfmad_lane_f64
+// COMMONIR:        [[EXTRACT:%.*]] = extractelement <1 x double> %c, i32 0
+// UNCONSTRAINED:   [[TMP2:%.*]] = call double @llvm.fma.f64(double %b, double [[EXTRACT]], double %a)
+// CONSTRAINED:     [[TMP2:%.*]] = call double @llvm.experimental.constrained.fma.f64(double %b, double [[EXTRACT]], double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:        ret double [[TMP2]]
+float64_t test_vfmad_lane_f64(float64_t a, float64_t b, float64x1_t c) {
+  return vfmad_lane_f64(a, b, c, 0);
+}
+
+// COMMON-LABEL: test_vfmad_laneq_f64
+// COMMONIR:        [[EXTRACT:%.*]] = extractelement <2 x double> %c, i32 1
+// UNCONSTRAINED:   [[TMP2:%.*]] = call double @llvm.fma.f64(double %b, double [[EXTRACT]], double %a)
+// CONSTRAINED:     [[TMP2:%.*]] = call double @llvm.experimental.constrained.fma.f64(double %b, double [[EXTRACT]], double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmla d{{[0-9]+}}, d{{[0-9]+}}, v{{[0-9]+}}.d[{{[0-9]+}}]
+// COMMONIR:        ret double [[TMP2]]
+float64_t test_vfmad_laneq_f64(float64_t a, float64_t b, float64x2_t c) {
+  return vfmad_laneq_f64(a, b, c, 1);
+}
+
+// COMMON-LABEL: test_vfmss_lane_f32
+// COMMONIR:        [[SUB:%.*]] = fneg float %b
+// COMMONIR:        [[EXTRACT:%.*]] = extractelement <2 x float> %c, i32 1
+// UNCONSTRAINED:   [[TMP2:%.*]] = call float @llvm.fma.f32(float [[SUB]], float [[EXTRACT]], float %a)
+// CONSTRAINED:     [[TMP2:%.*]] = call float @llvm.experimental.constrained.fma.f32(float [[SUB]], float [[EXTRACT]], float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmls s{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}.s[{{[0-9]+}}]
+// COMMONIR:        ret float [[TMP2]]
+float32_t test_vfmss_lane_f32(float32_t a, float32_t b, float32x2_t c) {
+  return vfmss_lane_f32(a, b, c, 1);
+}
+
+// COMMON-LABEL: test_vfma_lane_f64
+// COMMONIR:        [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// COMMONIR:        [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8>
+// COMMONIR:        [[TMP2:%.*]] = bitcast <1 x double> %v to <8 x i8>
+// COMMONIR:        [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
+// COMMONIR:        [[LANE:%.*]] = shufflevector <1 x double> [[TMP3]], <1 x double> [[TMP3]], <1 x i32> zeroinitializer
+// COMMONIR:        [[FMLA:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
+// COMMONIR:        [[FMLA1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
+// UNCONSTRAINED:   [[FMLA2:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[FMLA]], <1 x double> [[LANE]], <1 x double> [[FMLA1]])
+// CONSTRAINED:     [[FMLA2:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> [[FMLA]], <1 x double> [[LANE]], <1 x double> [[FMLA1]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:        ret <1 x double> [[FMLA2]]
+float64x1_t test_vfma_lane_f64(float64x1_t a, float64x1_t b, float64x1_t v) {
+  return vfma_lane_f64(a, b, v, 0);
+}
+
+// COMMON-LABEL: test_vfms_lane_f64
+// COMMONIR:        [[SUB:%.*]] = fneg <1 x double> %b
+// COMMONIR:        [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// COMMONIR:        [[TMP1:%.*]] = bitcast <1 x double> [[SUB]] to <8 x i8>
+// COMMONIR:        [[TMP2:%.*]] = bitcast <1 x double> %v to <8 x i8>
+// COMMONIR:        [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
+// COMMONIR:        [[LANE:%.*]] = shufflevector <1 x double> [[TMP3]], <1 x double> [[TMP3]], <1 x i32> zeroinitializer
+// COMMONIR:        [[FMLA:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
+// COMMONIR:        [[FMLA1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
+// UNCONSTRAINED:   [[FMLA2:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[FMLA]], <1 x double> [[LANE]], <1 x double> [[FMLA1]])
+// CONSTRAINED:     [[FMLA2:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> [[FMLA]], <1 x double> [[LANE]], <1 x double> [[FMLA1]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:        ret <1 x double> [[FMLA2]]
+float64x1_t test_vfms_lane_f64(float64x1_t a, float64x1_t b, float64x1_t v) {
+  return vfms_lane_f64(a, b, v, 0);
+}
+
+// COMMON-LABEL: test_vfma_laneq_f64
+// COMMONIR:        [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// COMMONIR:        [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8>
+// COMMONIR:        [[TMP2:%.*]] = bitcast <2 x double> %v to <16 x i8>
+// COMMONIR:        [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to double
+// COMMONIR:        [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to double
+// COMMONIR:        [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
+// COMMONIR:        [[EXTRACT:%.*]] = extractelement <2 x double> [[TMP5]], i32 0
+// UNCONSTRAINED:   [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP4]], double [[EXTRACT]], double [[TMP3]])
+// CONSTRAINED:     [[TMP6:%.*]] = call double @llvm.experimental.constrained.fma.f64(double [[TMP4]], double [[EXTRACT]], double [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmla d{{[0-9]+}}, d{{[0-9]+}}, v{{[0-9]+}}.d[{{[0-9]+}}]
+// COMMONIR:        [[TMP7:%.*]] = bitcast double [[TMP6]] to <1 x double>
+// COMMONIR:        ret <1 x double> [[TMP7]]
+float64x1_t test_vfma_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) {
+  return vfma_laneq_f64(a, b, v, 0);
+}
+
+// COMMON-LABEL: test_vfms_laneq_f64
+// COMMONIR:        [[SUB:%.*]] = fneg <1 x double> %b
+// CHECK-ASM:       fneg d{{[0-9]+}}, d{{[0-9]+}}
+// COMMONIR:        [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
+// COMMONIR:        [[TMP1:%.*]] = bitcast <1 x double> [[SUB]] to <8 x i8>
+// COMMONIR:        [[TMP2:%.*]] = bitcast <2 x double> %v to <16 x i8>
+// COMMONIR:        [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to double
+// COMMONIR:        [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to double
+// COMMONIR:        [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
+// COMMONIR:        [[EXTRACT:%.*]] = extractelement <2 x double> [[TMP5]], i32 0
+// UNCONSTRAINED:   [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP4]], double [[EXTRACT]], double [[TMP3]])
+// CONSTRAINED:     [[TMP6:%.*]] = call double @llvm.experimental.constrained.fma.f64(double [[TMP4]], double [[EXTRACT]], double [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:       fmla d{{[0-9]+}}, d{{[0-9]+}}, v{{[0-9]+}}.d[{{[0-9]+}}]
+// COMMONIR:        [[TMP7:%.*]] = bitcast double [[TMP6]] to <1 x double>
+// COMMONIR:        ret <1 x double> [[TMP7]]
+float64x1_t test_vfms_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) {
+  return vfms_laneq_f64(a, b, v, 0);
+}
+

diff  --git a/clang/test/CodeGen/aarch64-v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/aarch64-v8.2a-fp16-intrinsics-constrained.c
new file mode 100644
index 000000000000..473d3ba53e96
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-v8.2a-fp16-intrinsics-constrained.c
@@ -0,0 +1,366 @@
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \
+// RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone \
+// RUN: -emit-llvm -o - %s | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \
+// RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone \
+// RUN: -ffp-exception-behavior=strict -emit-llvm -o - %s | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \
+// RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone -o - %s \
+// RUN: | FileCheck -vv --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+// REQUIRES: aarch64-registered-target
+
+// "Lowering of strict fp16 not yet implemented"
+// XFAIL: *
+
+#include <arm_fp16.h>
+
+// COMMON-LABEL: test_vceqzh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, eq
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vceqzh_f16(float16_t a) {
+  return vceqzh_f16(a);
+}
+
+// COMMON-LABEL: test_vcgezh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, ge
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vcgezh_f16(float16_t a) {
+  return vcgezh_f16(a);
+}
+
+// COMMON-LABEL: test_vcgtzh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, gt
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vcgtzh_f16(float16_t a) {
+  return vcgtzh_f16(a);
+}
+
+// COMMON-LABEL: test_vclezh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, ls
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vclezh_f16(float16_t a) {
+  return vclezh_f16(a);
+}
+
+// COMMON-LABEL: test_vcltzh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, mi
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vcltzh_f16(float16_t a) {
+  return vcltzh_f16(a);
+}
+
+// COMMON-LABEL: test_vcvth_f16_s16
+// UNCONSTRAINED:  [[VCVT:%.*]] = sitofp i16 %a to half
+// CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      scvtf
+// COMMONIR:       ret half [[VCVT]]
+float16_t test_vcvth_f16_s16 (int16_t a) {
+  return vcvth_f16_s16(a);
+}
+
+// COMMON-LABEL: test_vcvth_f16_s32
+// UNCONSTRAINED:  [[VCVT:%.*]] = sitofp i32 %a to half
+// CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      scvtf
+// COMMONIR:       ret half [[VCVT]]
+float16_t test_vcvth_f16_s32 (int32_t a) {
+  return vcvth_f16_s32(a);
+}
+
+// COMMON-LABEL: test_vcvth_f16_s64
+// UNCONSTRAINED:  [[VCVT:%.*]] = sitofp i64 %a to half
+// CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      scvtf
+// COMMONIR:       ret half [[VCVT]]
+float16_t test_vcvth_f16_s64 (int64_t a) {
+  return vcvth_f16_s64(a);
+}
+
+// COMMON-LABEL: test_vcvth_f16_u16
+// UNCONSTRAINED:  [[VCVT:%.*]] = uitofp i16 %a to half
+// CONSTRAINED:  [[VCVT:%.*]] = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      ucvtf
+// COMMONIR:       ret half [[VCVT]]
+float16_t test_vcvth_f16_u16 (uint16_t a) {
+  return vcvth_f16_u16(a);
+}
+
+// COMMON-LABEL: test_vcvth_f16_u32
+// UNCONSTRAINED:  [[VCVT:%.*]] = uitofp i32 %a to half
+// CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      ucvtf
+// COMMONIR:  ret half [[VCVT]]
+float16_t test_vcvth_f16_u32 (uint32_t a) {
+  return vcvth_f16_u32(a);
+}
+
+// COMMON-LABEL: test_vcvth_f16_u64
+// UNCONSTRAINED:  [[VCVT:%.*]] = uitofp i64 %a to half
+// CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      ucvtf
+// COMMONIR:       ret half [[VCVT]]
+float16_t test_vcvth_f16_u64 (uint64_t a) {
+  return vcvth_f16_u64(a);
+}
+
+// COMMON-LABEL: test_vcvth_s16_f16
+// UNCONSTRAINED:  [[VCVT:%.*]] = fptosi half %a to i16
+// CONSTRAINED:    [[VCVT:%.*]] = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      fcvt  [[CVTREG:s[0-9]+]], {{h[0-9]+}}
+// CHECK-ASM:      fcvtzs {{w[0-9]+}}, [[CVTREG]]
+// COMMONIR:       ret i16 [[VCVT]]
+int16_t test_vcvth_s16_f16 (float16_t a) {
+  return vcvth_s16_f16(a);
+}
+
+// COMMON-LABEL: test_vcvth_s32_f16
+// UNCONSTRAINED:  [[VCVT:%.*]] = fptosi half %a to i32
+// CONSTRAINED:    [[VCVT:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      fcvt  [[CVTREG:s[0-9]+]], {{h[0-9]+}}
+// CHECK-ASM:      fcvtzs {{w[0-9]+}}, [[CVTREG]]
+// COMMONIR:       ret i32 [[VCVT]]
+int32_t test_vcvth_s32_f16 (float16_t a) {
+  return vcvth_s32_f16(a);
+}
+
+// COMMON-LABEL: test_vcvth_s64_f16
+// UNCONSTRAINED:  [[VCVT:%.*]] = fptosi half %a to i64
+// CONSTRAINED:    [[VCVT:%.*]] = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      fcvt  [[CVTREG:s[0-9]+]], {{h[0-9]+}}
+// CHECK-ASM:      fcvtzs {{x[0-9]+}}, [[CVTREG]]
+// COMMONIR:       ret i64 [[VCVT]]
+int64_t test_vcvth_s64_f16 (float16_t a) {
+  return vcvth_s64_f16(a);
+}
+
+// COMMON-LABEL: test_vcvth_u16_f16
+// UNCONSTRAINED:  [[VCVT:%.*]] = fptoui half %a to i16
+// CONSTRAINED:    [[VCVT:%.*]] = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      fcvt  [[CVTREG:s[0-9]+]], {{h[0-9]+}}
+// CHECK-ASM:      fcvtzu {{w[0-9]+}}, [[CVTREG]]
+// COMMONIR:       ret i16 [[VCVT]]
+uint16_t test_vcvth_u16_f16 (float16_t a) {
+  return vcvth_u16_f16(a);
+}
+
+// COMMON-LABEL: test_vcvth_u32_f16
+// UNCONSTRAINED:  [[VCVT:%.*]] = fptoui half %a to i32
+// CONSTRAINED:    [[VCVT:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      fcvt  [[CVTREG:s[0-9]+]], {{h[0-9]+}}
+// CHECK-ASM:      fcvtzu {{w[0-9]+}}, [[CVTREG]]
+// COMMONIR:       ret i32 [[VCVT]]
+uint32_t test_vcvth_u32_f16 (float16_t a) {
+  return vcvth_u32_f16(a);
+}
+
+// COMMON-LABEL: test_vcvth_u64_f16
+// UNCONSTRAINED:  [[VCVT:%.*]] = fptoui half %a to i64
+// CONSTRAINED:    [[VCVT:%.*]] = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      fcvt  [[CVTREG:s[0-9]+]], {{h[0-9]+}}
+// CHECK-ASM:      fcvtzu {{x[0-9]+}}, [[CVTREG]]
+// COMMONIR:       ret i64 [[VCVT]]
+uint64_t test_vcvth_u64_f16 (float16_t a) {
+  return vcvth_u64_f16(a);
+}
+
+// COMMON-LABEL: test_vrndh_f16
+// UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      frintz
+// COMMONIR:       ret half [[RND]]
+float16_t test_vrndh_f16(float16_t a) {
+  return vrndh_f16(a);
+}
+
+// COMMON-LABEL: test_vrndah_f16
+// UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.round.f16(half %a)
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.round.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      frinta
+// COMMONIR:       ret half [[RND]]
+float16_t test_vrndah_f16(float16_t a) {
+  return vrndah_f16(a);
+}
+
+// COMMON-LABEL: test_vrndih_f16
+// UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.nearbyint.f16(half %a)
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.nearbyint.f16(half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      frinti
+// COMMONIR:       ret half [[RND]]
+float16_t test_vrndih_f16(float16_t a) {
+  return vrndih_f16(a);
+}
+
+// COMMON-LABEL: test_vrndmh_f16
+// UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.floor.f16(half %a)
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.floor.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      frintm
+// COMMONIR:       ret half [[RND]]
+float16_t test_vrndmh_f16(float16_t a) {
+  return vrndmh_f16(a);
+}
+
+// COMMON-LABEL: test_vrndph_f16
+// UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.ceil.f16(half %a)
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.ceil.f16(half %a, metadata !"fpexcept.strict")
+// CHECK-ASM:      frintp
+// COMMONIR:       ret half [[RND]]
+float16_t test_vrndph_f16(float16_t a) {
+  return vrndph_f16(a);
+}
+
+// COMMON-LABEL: test_vrndxh_f16
+// UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.rint.f16(half %a)
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.rint.f16(half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      frintx
+// COMMONIR:       ret half [[RND]]
+float16_t test_vrndxh_f16(float16_t a) {
+  return vrndxh_f16(a);
+}
+
+// COMMON-LABEL: test_vsqrth_f16
+// UNCONSTRAINED:  [[SQR:%.*]] = call half @llvm.sqrt.f16(half %a)
+// CONSTRAINED:    [[SQR:%.*]] = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fsqrt
+// COMMONIR:       ret half [[SQR]]
+float16_t test_vsqrth_f16(float16_t a) {
+  return vsqrth_f16(a);
+}
+
+// COMMON-LABEL: test_vaddh_f16
+// UNCONSTRAINED:  [[ADD:%.*]] = fadd half %a, %b
+// CONSTRAINED:    [[ADD:%.*]] = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fadd
+// COMMONIR:       ret half [[ADD]]
+float16_t test_vaddh_f16(float16_t a, float16_t b) {
+  return vaddh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vceqh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oeq half %a, %b
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, eq
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vceqh_f16(float16_t a, float16_t b) {
+  return vceqh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vcgeh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oge half %a, %b
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, ge
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vcgeh_f16(float16_t a, float16_t b) {
+  return vcgeh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vcgth_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ogt half %a, %b
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, gt
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vcgth_f16(float16_t a, float16_t b) {
+  return vcgth_f16(a, b);
+}
+
+// COMMON-LABEL: test_vcleh_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ole half %a, %b
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, ls
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vcleh_f16(float16_t a, float16_t b) {
+  return vcleh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vclth_f16
+// UNCONSTRAINED:  [[TMP1:%.*]] = fcmp olt half %a, %b
+// CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict")
+// CHECK-ASM:      fcmp
+// CHECK-ASM:      cset {{w[0-9]+}}, mi
+// COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
+// COMMONIR:       ret i16 [[TMP2]]
+uint16_t test_vclth_f16(float16_t a, float16_t b) {
+  return vclth_f16(a, b);
+}
+
+// COMMON-LABEL: test_vdivh_f16
+// UNCONSTRAINED:  [[DIV:%.*]] = fdiv half %a, %b
+// CONSTRAINED:    [[DIV:%.*]] = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fdiv
+// COMMONIR:       ret half [[DIV]]
+float16_t test_vdivh_f16(float16_t a, float16_t b) {
+  return vdivh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vmulh_f16
+// UNCONSTRAINED:  [[MUL:%.*]] = fmul half %a, %b
+// CONSTRAINED:  [[MUL:%.*]] = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmul
+// COMMONIR:       ret half [[MUL]]
+float16_t test_vmulh_f16(float16_t a, float16_t b) {
+  return vmulh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vsubh_f16
+// UNCONSTRAINED:  [[SUB:%.*]] = fsub half %a, %b
+// CONSTRAINED:    [[SUB:%.*]] = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fsub
+// COMMONIR:       ret half [[SUB]]
+float16_t test_vsubh_f16(float16_t a, float16_t b) {
+  return vsubh_f16(a, b);
+}
+
+// COMMON-LABEL: test_vfmah_f16
+// UNCONSTRAINED:  [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half %c, half %a)
+// CONSTRAINED:    [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half %b, half %c, half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmadd
+// COMMONIR:       ret half [[FMA]]
+float16_t test_vfmah_f16(float16_t a, float16_t b, float16_t c) {
+  return vfmah_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmsh_f16
+// UNCONSTRAINED:  [[SUB:%.*]] = fsub half 0xH8000, %b
+// CONSTRAINED:    [[SUB:%.*]] = call half @llvm.experimental.constrained.fsub.f16(half 0xH8000, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// UNCONSTRAINED:  [[ADD:%.*]] = call half @llvm.fma.f16(half [[SUB]], half %c, half %a)
+// CONSTRAINED:    [[ADD:%.*]] = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half %c, half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmsub
+// COMMONIR:       ret half [[ADD]]
+float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
+  return vfmsh_f16(a, b, c);
+}
+

diff  --git a/clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c b/clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c
new file mode 100644
index 000000000000..d7830f71e2de
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c
@@ -0,0 +1,337 @@
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
+// RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
+// RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg | llc -o=- - \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg | llc -o=- - \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_neon.h>
+
+// COMMON-LABEL: test_vsqrt_f16
+// UNCONSTRAINED:  [[SQR:%.*]] = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %a)
+// CONSTRAINED:    [[SQR:%.*]] = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fsqrt v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:       ret <4 x half> [[SQR]]
+float16x4_t test_vsqrt_f16(float16x4_t a) {
+  return vsqrt_f16(a);
+}
+
+// COMMON-LABEL: test_vsqrtq_f16
+// UNCONSTRAINED:  [[SQR:%.*]] = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
+// CONSTRAINED:    [[SQR:%.*]] = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fsqrt v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:       ret <8 x half> [[SQR]]
+float16x8_t test_vsqrtq_f16(float16x8_t a) {
+  return vsqrtq_f16(a);
+}
+
+// COMMON-LABEL: test_vfma_f16
+// UNCONSTRAINED:  [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a)
+// CONSTRAINED:    [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:       ret <4 x half> [[ADD]]
+float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
+  return vfma_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmaq_f16
+// UNCONSTRAINED:  [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a)
+// CONSTRAINED:    [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:       ret <8 x half> [[ADD]]
+float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
+  return vfmaq_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfms_f16
+// COMMONIR:       [[SUB:%.*]] = fneg <4 x half> %b
+// CHECK-ASM:      fneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// UNCONSTRAINED:  [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a)
+// CONSTRAINED:    [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:       ret <4 x half> [[ADD]]
+float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
+  return vfms_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmsq_f16
+// COMMONIR:       [[SUB:%.*]] = fneg <8 x half> %b
+// CHECK-ASM:      fneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// UNCONSTRAINED:  [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a)
+// CONSTRAINED:    [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:      fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:       ret <8 x half> [[ADD]]
+float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
+  return vfmsq_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfma_lane_f16
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      ret <4 x half> [[FMLA]]
+float16x4_t test_vfma_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
+  return vfma_lane_f16(a, b, c, 3);
+}
+
+// COMMON-LABEL: test_vfmaq_lane_f16
+// COMMONIR:      [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      ret <8 x half> [[FMLA]]
+float16x8_t test_vfmaq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
+  return vfmaq_lane_f16(a, b, c, 3);
+}
+
+// COMMON-LABEL: test_vfma_laneq_f16
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      ret <4 x half> [[FMLA]]
+float16x4_t test_vfma_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
+  return vfma_laneq_f16(a, b, c, 7);
+}
+
+// COMMON-LABEL: test_vfmaq_laneq_f16
+// COMMONIR:      [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      ret <8 x half> [[FMLA]]
+float16x8_t test_vfmaq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
+  return vfmaq_laneq_f16(a, b, c, 7);
+}
+
+// COMMON-LABEL: test_vfma_n_f16
+// COMMONIR:      [[TMP0:%.*]] = insertelement <4 x half> undef, half %c, i32 0
+// COMMONIR:      [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %c, i32 1
+// COMMONIR:      [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
+// COMMONIR:      [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
+// UNCONSTRAINED: [[FMA:%.*]]  = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      ret <4 x half> [[FMA]]
+float16x4_t test_vfma_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
+  return vfma_n_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmaq_n_f16
+// COMMONIR:      [[TMP0:%.*]] = insertelement <8 x half> undef, half %c, i32 0
+// COMMONIR:      [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %c, i32 1
+// COMMONIR:      [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %c, i32 2
+// COMMONIR:      [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %c, i32 3
+// COMMONIR:      [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %c, i32 4
+// COMMONIR:      [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %c, i32 5
+// COMMONIR:      [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
+// COMMONIR:      [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
+// UNCONSTRAINED: [[FMA:%.*]]  = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      ret <8 x half> [[FMA]]
+float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
+  return vfmaq_n_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmah_lane_f16
+// COMMONIR:      [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
+// UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmadd h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}
+// COMMONIR:      ret half [[FMA]]
+float16_t test_vfmah_lane_f16(float16_t a, float16_t b, float16x4_t c) {
+  return vfmah_lane_f16(a, b, c, 3);
+}
+
+// COMMON-LABEL: test_vfmah_laneq_f16
+// COMMONIR:      [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
+// UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmadd h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}
+// COMMONIR:      ret half [[FMA]]
+float16_t test_vfmah_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
+  return vfmah_laneq_f16(a, b, c, 7);
+}
+
+// COMMON-LABEL: test_vfms_lane_f16
+// COMMONIR:      [[SUB:%.*]]  = fneg <4 x half> %b
+// CHECK-ASM:     fneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <4 x half> [[SUB]] to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
+// UNCONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
+// CONSTRAINED:   [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      ret <4 x half> [[FMA]]
+float16x4_t test_vfms_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
+  return vfms_lane_f16(a, b, c, 3);
+}
+
+// COMMON-LABEL: test_vfmsq_lane_f16
+// COMMONIR:      [[SUB:%.*]]  = fneg <8 x half> %b
+// CHECK-ASM:     fneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <8 x half> [[SUB]] to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      ret <8 x half> [[FMLA]]
+float16x8_t test_vfmsq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
+  return vfmsq_lane_f16(a, b, c, 3);
+}
+
+// COMMON-LABEL: test_vfms_laneq_f16
+// COMMONIR:      [[SUB:%.*]]  = fneg <4 x half> %b
+// CHECK-ASM-NOT: fneg
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <4 x half> [[SUB]] to <8 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      ret <4 x half> [[FMLA]]
+float16x4_t test_vfms_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
+  return vfms_laneq_f16(a, b, c, 7);
+}
+
+// COMMON-LABEL: test_vfmsq_laneq_f16
+// COMMONIR:      [[SUB:%.*]]  = fneg <8 x half> %b
+// CHECK-ASM-NOT: fneg
+// COMMONIR:      [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
+// COMMONIR:      [[TMP1:%.*]] = bitcast <8 x half> [[SUB]] to <16 x i8>
+// COMMONIR:      [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
+// COMMONIR:      [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
+// COMMONIR:      [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
+// COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
+// COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      ret <8 x half> [[FMLA]]
+float16x8_t test_vfmsq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
+  return vfmsq_laneq_f16(a, b, c, 7);
+}
+
+// COMMON-LABEL: test_vfms_n_f16
+// COMMONIR:      [[SUB:%.*]]  = fneg <4 x half> %b
+// CHECK-ASM:     fneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      [[TMP0:%.*]] = insertelement <4 x half> undef, half %c, i32 0
+// COMMONIR:      [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %c, i32 1
+// COMMONIR:      [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
+// COMMONIR:      [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
+// UNCONSTRAINED: [[FMA:%.*]]  = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+// COMMONIR:      ret <4 x half> [[FMA]]
+float16x4_t test_vfms_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
+  return vfms_n_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmsq_n_f16
+// COMMONIR:      [[SUB:%.*]]  = fneg <8 x half> %b
+// CHECK-ASM:     fneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      [[TMP0:%.*]] = insertelement <8 x half> undef, half %c, i32 0
+// COMMONIR:      [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %c, i32 1
+// COMMONIR:      [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %c, i32 2
+// COMMONIR:      [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %c, i32 3
+// COMMONIR:      [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %c, i32 4
+// COMMONIR:      [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %c, i32 5
+// COMMONIR:      [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
+// COMMONIR:      [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
+// UNCONSTRAINED: [[FMA:%.*]]  = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+// COMMONIR:      ret <8 x half> [[FMA]]
+float16x8_t test_vfmsq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
+  return vfmsq_n_f16(a, b, c);
+}
+
+// COMMON-LABEL: test_vfmsh_lane_f16
+// UNCONSTRAINED: [[TMP0:%.*]] = fpext half %b to float
+// CONSTRAINED:   [[TMP0:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half %b, metadata !"fpexcept.strict")
+// CHECK-ASM:     fcvt s{{[0-9]+}}, h{{[0-9]+}}
+// COMMONIR:      [[TMP1:%.*]] = fneg float [[TMP0]]
+// CHECK-ASM:     fneg s{{[0-9]+}}, s{{[0-9]+}}
+// UNCONSTRAINED: [[SUB:%.*]]  = fptrunc float [[TMP1]] to half
+// CONSTRAINED:   [[SUB:%.*]]  = call half @llvm.experimental.constrained.fptrunc.f16.f32(float [[TMP1]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcvt h{{[0-9]+}}, s{{[0-9]+}}
+// COMMONIR:      [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
+// UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmadd h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}
+// COMMONIR:      ret half [[FMA]]
+float16_t test_vfmsh_lane_f16(float16_t a, float16_t b, float16x4_t c) {
+  return vfmsh_lane_f16(a, b, c, 3);
+}
+
+// COMMON-LABEL: test_vfmsh_laneq_f16
+// UNCONSTRAINED: [[TMP0:%.*]] = fpext half %b to float
+// CONSTRAINED:   [[TMP0:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half %b, metadata !"fpexcept.strict")
+// CHECK-ASM:     fcvt s{{[0-9]+}}, h{{[0-9]+}}
+// COMMONIR:      [[TMP1:%.*]] = fneg float [[TMP0]]
+// CHECK-ASM:     fneg s{{[0-9]+}}, s{{[0-9]+}}
+// UNCONSTRAINED: [[SUB:%.*]]  = fptrunc float [[TMP1]] to half
+// CONSTRAINED:   [[SUB:%.*]]  = call half @llvm.experimental.constrained.fptrunc.f16.f32(float [[TMP1]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fcvt h{{[0-9]+}}, s{{[0-9]+}}
+// COMMONIR:      [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
+// UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM:     fmadd h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}, h{{[0-9]+}}
+// COMMONIR:      ret half [[FMA]]
+float16_t test_vfmsh_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
+  return vfmsh_laneq_f16(a, b, c, 7);
+}
+

diff  --git a/clang/test/CodeGen/arm-neon-directed-rounding-constrained.c b/clang/test/CodeGen/arm-neon-directed-rounding-constrained.c
new file mode 100644
index 000000000000..5246993173f8
--- /dev/null
+++ b/clang/test/CodeGen/arm-neon-directed-rounding-constrained.c
@@ -0,0 +1,61 @@
+// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu cortex-a57 \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | FileCheck -check-prefixes=COMMON,COMMONIR,UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-linux-gnueabihf -target-feature +neon \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | FileCheck -check-prefixes=COMMON,COMMONIR,UNCONSTRAINED %s
+
+// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu cortex-a57 \
+// RUN:     -ffp-exception-behavior=strict \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | FileCheck -check-prefixes=COMMON,COMMONIR,CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-linux-gnueabihf -target-feature +neon \
+// RUN:     -ffp-exception-behavior=strict \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | FileCheck -check-prefixes=COMMON,COMMONIR,CONSTRAINED %s
+
+// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu cortex-a57 \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | llc -o=- - | FileCheck -check-prefixes=COMMON,CHECK-ASM32 %s
+// RUN: %clang_cc1 -triple arm64-linux-gnueabihf -target-feature +neon \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | llc -o=- - | FileCheck -check-prefixes=COMMON,CHECK-ASM64 %s
+
+// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu cortex-a57 \
+// RUN:     -ffp-exception-behavior=strict \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | llc -o=- - | FileCheck -check-prefixes=COMMON,CHECK-ASM32 %s
+// RUN: %clang_cc1 -triple arm64-linux-gnueabihf -target-feature +neon \
+// RUN:     -ffp-exception-behavior=strict \
+// RUN:     -ffreestanding -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:     opt -S -mem2reg | llc -o=- - | FileCheck -check-prefixes=COMMON,CHECK-ASM64 %s
+
+// REQUIRES: arm-registered-target,aarch64-registered-target
+
+#include <arm_neon.h>
+
+// COMMON-LABEL: test_vrndi_f32
+// COMMONIR:      [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
+// UNCONSTRAINED: [[VRNDI1_I:%.*]] = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %a)
+// CONSTRAINED:   [[VRNDI1_I:%.*]] = call <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM32:   vrintr.f32 s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM32:   vrintr.f32 s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM64:   frinti v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+// COMMONIR:      ret <2 x float> [[VRNDI1_I]]
+float32x2_t test_vrndi_f32(float32x2_t a) {
+  return vrndi_f32(a);
+}
+
+// COMMON-LABEL: test_vrndiq_f32
+// COMMONIR:      [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
+// UNCONSTRAINED: [[VRNDI1_I:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a)
+// CONSTRAINED:   [[VRNDI1_I:%.*]] = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK-ASM32:   vrintr.f32 s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM32:   vrintr.f32 s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM32:   vrintr.f32 s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM32:   vrintr.f32 s{{[0-9]+}}, s{{[0-9]+}}
+// CHECK-ASM64:   frinti v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+// COMMONIR:      ret <4 x float> [[VRNDI1_I]]
+float32x4_t test_vrndiq_f32(float32x4_t a) {
+  return vrndiq_f32(a);
+}

diff  --git a/clang/test/CodeGen/arm64-vrnd-constrained.c b/clang/test/CodeGen/arm64-vrnd-constrained.c
new file mode 100644
index 000000000000..bbded8f2c7f6
--- /dev/null
+++ b/clang/test/CodeGen/arm64-vrnd-constrained.c
@@ -0,0 +1,43 @@
+// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -flax-vector-conversions=none -emit-llvm -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -flax-vector-conversions=none -ffp-exception-behavior=strict -emit-llvm -o - %s \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -flax-vector-conversions=none -emit-llvm -o - %s | llc -o=- - \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -flax-vector-conversions=none -ffp-exception-behavior=strict -emit-llvm -o - %s | llc -o=- - \
+// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_neon.h>
+
+float64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); }
+// COMMON-LABEL: rnd5
+// UNCONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>
+// CHECK-ASM:     frintz.2d v{{[0-9]+}}, v{{[0-9]+}}
+
+float64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); }
+// COMMON-LABEL: rnd13
+// UNCONSTRAINED: call <2 x double> @llvm.floor.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>
+// CHECK-ASM:     frintm.2d v{{[0-9]+}}, v{{[0-9]+}}
+
+float64x2_t rnd18(float64x2_t a) { return vrndpq_f64(a); }
+// COMMON-LABEL: rnd18
+// UNCONSTRAINED: call <2 x double> @llvm.ceil.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>
+// CHECK-ASM:     frintp.2d v{{[0-9]+}}, v{{[0-9]+}}
+
+float64x2_t rnd22(float64x2_t a) { return vrndaq_f64(a); }
+// COMMON-LABEL: rnd22
+// UNCONSTRAINED: call <2 x double> @llvm.round.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>
+// CHECK-ASM:     frinta.2d v{{[0-9]+}}, v{{[0-9]+}}
+
+float64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); }
+// COMMON-LABEL: rnd25
+// UNCONSTRAINED: call <2 x double> @llvm.rint.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>
+// CHECK-ASM:     frintx.2d v{{[0-9]+}}, v{{[0-9]+}}
+

diff  --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h
index 2e3ea1400d9a..8d139c88a814 100644
--- a/llvm/include/llvm/IR/Function.h
+++ b/llvm/include/llvm/IR/Function.h
@@ -197,6 +197,11 @@ class Function : public GlobalObject, public ilist_node<Function> {
   /// returns Intrinsic::not_intrinsic!
   bool isIntrinsic() const { return HasLLVMReservedName; }
 
+  /// Returns true if the function is one of the "Constrained Floating-Point
+  /// Intrinsics". Returns false if not, and returns false when
+  /// getIntrinsicID() returns Intrinsic::not_intrinsic.
+  bool isConstrainedFPIntrinsic() const;
+
   static Intrinsic::ID lookupIntrinsicID(StringRef Name);
 
   /// Recalculate the ID for this function if it is an Intrinsic defined

diff  --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp
index 5212a6eab780..9e3a837f06f1 100644
--- a/llvm/lib/IR/Function.cpp
+++ b/llvm/lib/IR/Function.cpp
@@ -320,6 +320,18 @@ static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) {
   return MutableArrayRef<Argument>(Args, Count);
 }
 
+bool Function::isConstrainedFPIntrinsic() const {
+  switch (getIntrinsicID()) {
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
+  case Intrinsic::INTRINSIC:
+#include "llvm/IR/ConstrainedOps.def"
+    return true;
+#undef INSTRUCTION
+  default:
+    return false;
+  }
+}
+
 void Function::clearArguments() {
   for (Argument &A : makeArgArray(Arguments, NumArgs)) {
     A.setName("");


        


More information about the cfe-commits mailing list