[clang] 2e667d0 - [FPEnv][SystemZ] Platform-specific builtin constrained FP enablement

Kevin P. Neal via cfe-commits cfe-commits at lists.llvm.org
Tue Jan 21 09:45:10 PST 2020


Author: Kevin P. Neal
Date: 2020-01-21T12:44:39-05:00
New Revision: 2e667d07c773f684ea893b9ce5d9b73e9f23b438

URL: https://github.com/llvm/llvm-project/commit/2e667d07c773f684ea893b9ce5d9b73e9f23b438
DIFF: https://github.com/llvm/llvm-project/commit/2e667d07c773f684ea893b9ce5d9b73e9f23b438.diff

LOG: [FPEnv][SystemZ] Platform-specific builtin constrained FP enablement

When constrained floating point is enabled the SystemZ-specific builtins
don't use constrained intrinsics in some cases. Fix that.

Differential Revision: https://reviews.llvm.org/D72722

Added: 
    clang/test/CodeGen/builtins-systemz-vector-constrained.c
    clang/test/CodeGen/builtins-systemz-vector2-constrained.c
    clang/test/CodeGen/builtins-systemz-zvector-constrained.c
    clang/test/CodeGen/builtins-systemz-zvector2-constrained.c
    clang/test/CodeGen/builtins-systemz-zvector3-constrained.c

Modified: 
    clang/lib/CodeGen/CGBuiltin.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 8d00d3d64f5c..29eebbb403ea 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -13310,8 +13310,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
   case SystemZ::BI__builtin_s390_vfsqdb: {
     llvm::Type *ResultType = ConvertType(E->getType());
     Value *X = EmitScalarExpr(E->getArg(0));
-    Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
-    return Builder.CreateCall(F, X);
+    if (Builder.getIsFPConstrained()) {
+      Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
+      return Builder.CreateConstrainedFPCall(F, { X });
+    } else {
+      Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+      return Builder.CreateCall(F, X);
+    }
   }
   case SystemZ::BI__builtin_s390_vfmasb:
   case SystemZ::BI__builtin_s390_vfmadb: {
@@ -13319,8 +13324,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     Value *X = EmitScalarExpr(E->getArg(0));
     Value *Y = EmitScalarExpr(E->getArg(1));
     Value *Z = EmitScalarExpr(E->getArg(2));
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
-    return Builder.CreateCall(F, {X, Y, Z});
+    if (Builder.getIsFPConstrained()) {
+      Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+      return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
+    } else {
+      Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+      return Builder.CreateCall(F, {X, Y, Z});
+    }
   }
   case SystemZ::BI__builtin_s390_vfmssb:
   case SystemZ::BI__builtin_s390_vfmsdb: {
@@ -13328,8 +13338,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     Value *X = EmitScalarExpr(E->getArg(0));
     Value *Y = EmitScalarExpr(E->getArg(1));
     Value *Z = EmitScalarExpr(E->getArg(2));
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
-    return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+    if (Builder.getIsFPConstrained()) {
+      Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+      return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+    } else {
+      Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+      return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+    }
   }
   case SystemZ::BI__builtin_s390_vfnmasb:
   case SystemZ::BI__builtin_s390_vfnmadb: {
@@ -13337,8 +13352,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     Value *X = EmitScalarExpr(E->getArg(0));
     Value *Y = EmitScalarExpr(E->getArg(1));
     Value *Z = EmitScalarExpr(E->getArg(2));
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
-    return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+    if (Builder.getIsFPConstrained()) {
+      Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+      return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y,  Z}), "neg");
+    } else {
+      Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+      return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+    }
   }
   case SystemZ::BI__builtin_s390_vfnmssb:
   case SystemZ::BI__builtin_s390_vfnmsdb: {
@@ -13346,9 +13366,15 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     Value *X = EmitScalarExpr(E->getArg(0));
     Value *Y = EmitScalarExpr(E->getArg(1));
     Value *Z = EmitScalarExpr(E->getArg(2));
-    Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
-    Value *NegZ = Builder.CreateFNeg(Z, "neg");
-    return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
+    if (Builder.getIsFPConstrained()) {
+      Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+      Value *NegZ = Builder.CreateFNeg(Z, "sub");
+      return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
+    } else {
+      Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+      Value *NegZ = Builder.CreateFNeg(Z, "neg");
+      return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
+    }
   }
   case SystemZ::BI__builtin_s390_vflpsb:
   case SystemZ::BI__builtin_s390_vflpdb: {
@@ -13377,30 +13403,42 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     // Check whether this instance can be represented via a LLVM standard
     // intrinsic.  We only support some combinations of M4 and M5.
     Intrinsic::ID ID = Intrinsic::not_intrinsic;
+    Intrinsic::ID CI;
     switch (M4.getZExtValue()) {
     default: break;
     case 0:  // IEEE-inexact exception allowed
       switch (M5.getZExtValue()) {
       default: break;
-      case 0: ID = Intrinsic::rint; break;
+      case 0: ID = Intrinsic::rint;
+              CI = Intrinsic::experimental_constrained_rint; break;
       }
       break;
     case 4:  // IEEE-inexact exception suppressed
       switch (M5.getZExtValue()) {
       default: break;
-      case 0: ID = Intrinsic::nearbyint; break;
-      case 1: ID = Intrinsic::round; break;
-      case 5: ID = Intrinsic::trunc; break;
-      case 6: ID = Intrinsic::ceil; break;
-      case 7: ID = Intrinsic::floor; break;
+      case 0: ID = Intrinsic::nearbyint;
+              CI = Intrinsic::experimental_constrained_nearbyint; break;
+      case 1: ID = Intrinsic::round;
+              CI = Intrinsic::experimental_constrained_round; break;
+      case 5: ID = Intrinsic::trunc;
+              CI = Intrinsic::experimental_constrained_trunc; break;
+      case 6: ID = Intrinsic::ceil;
+              CI = Intrinsic::experimental_constrained_ceil; break;
+      case 7: ID = Intrinsic::floor;
+              CI = Intrinsic::experimental_constrained_floor; break;
       }
       break;
     }
     if (ID != Intrinsic::not_intrinsic) {
-      Function *F = CGM.getIntrinsic(ID, ResultType);
-      return Builder.CreateCall(F, X);
+      if (Builder.getIsFPConstrained()) {
+        Function *F = CGM.getIntrinsic(CI, ResultType);
+        return Builder.CreateConstrainedFPCall(F, X);
+      } else {
+        Function *F = CGM.getIntrinsic(ID, ResultType);
+        return Builder.CreateCall(F, X);
+      }
     }
-    switch (BuiltinID) {
+    switch (BuiltinID) { // FIXME: constrained version?
       case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
       case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
       default: llvm_unreachable("Unknown BuiltinID");
@@ -13423,13 +13461,20 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     // Check whether this instance can be represented via a LLVM standard
     // intrinsic.  We only support some values of M4.
     Intrinsic::ID ID = Intrinsic::not_intrinsic;
+    Intrinsic::ID CI;
     switch (M4.getZExtValue()) {
     default: break;
-    case 4: ID = Intrinsic::maxnum; break;
+    case 4: ID = Intrinsic::maxnum;
+            CI = Intrinsic::experimental_constrained_maxnum; break;
     }
     if (ID != Intrinsic::not_intrinsic) {
-      Function *F = CGM.getIntrinsic(ID, ResultType);
-      return Builder.CreateCall(F, {X, Y});
+      if (Builder.getIsFPConstrained()) {
+        Function *F = CGM.getIntrinsic(CI, ResultType);
+        return Builder.CreateConstrainedFPCall(F, {X, Y});
+      } else {
+        Function *F = CGM.getIntrinsic(ID, ResultType);
+        return Builder.CreateCall(F, {X, Y});
+      }
     }
     switch (BuiltinID) {
       case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
@@ -13453,13 +13498,20 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
     // Check whether this instance can be represented via a LLVM standard
     // intrinsic.  We only support some values of M4.
     Intrinsic::ID ID = Intrinsic::not_intrinsic;
+    Intrinsic::ID CI;
     switch (M4.getZExtValue()) {
     default: break;
-    case 4: ID = Intrinsic::minnum; break;
+    case 4: ID = Intrinsic::minnum;
+            CI = Intrinsic::experimental_constrained_minnum; break;
     }
     if (ID != Intrinsic::not_intrinsic) {
-      Function *F = CGM.getIntrinsic(ID, ResultType);
-      return Builder.CreateCall(F, {X, Y});
+      if (Builder.getIsFPConstrained()) {
+        Function *F = CGM.getIntrinsic(CI, ResultType);
+        return Builder.CreateConstrainedFPCall(F, {X, Y});
+      } else {
+        Function *F = CGM.getIntrinsic(ID, ResultType);
+        return Builder.CreateCall(F, {X, Y});
+      }
     }
     switch (BuiltinID) {
       case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;

diff  --git a/clang/test/CodeGen/builtins-systemz-vector-constrained.c b/clang/test/CodeGen/builtins-systemz-vector-constrained.c
new file mode 100644
index 000000000000..6d2845504a39
--- /dev/null
+++ b/clang/test/CodeGen/builtins-systemz-vector-constrained.c
@@ -0,0 +1,55 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-ibm-linux -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+
+typedef __attribute__((vector_size(16))) signed long long vec_slong;
+typedef __attribute__((vector_size(16))) double vec_double;
+
+volatile vec_slong vsl;
+volatile vec_double vd;
+
+int cc;
+
+void test_float(void) {
+  vsl = __builtin_s390_vfcedbs(vd, vd, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  vsl = __builtin_s390_vfchdbs(vd, vd, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  vsl = __builtin_s390_vfchedbs(vd, vd, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+  vsl = __builtin_s390_vftcidb(vd, 0, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0)
+  vsl = __builtin_s390_vftcidb(vd, 4095, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
+
+  vd = __builtin_s390_vfsqdb(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %{{.*}})
+
+  vd = __builtin_s390_vfmadb(vd, vd, vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+  vd = __builtin_s390_vfmsdb(vd, vd, vd);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <2 x double> {{.*}}
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], {{.*}})
+
+  vd = __builtin_s390_vflpdb(vd);
+  // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vflndb(vd);
+  // CHECK: [[ABS:%[^ ]+]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+  // CHECK: fneg <2 x double> [[ABS]]
+
+  vd = __builtin_s390_vfidb(vd, 0, 0);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vfidb(vd, 4, 0);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vfidb(vd, 4, 1);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vfidb(vd, 4, 5);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vfidb(vd, 4, 6);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vfidb(vd, 4, 7);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}})
+  vd = __builtin_s390_vfidb(vd, 4, 4);
+  // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
+}

diff  --git a/clang/test/CodeGen/builtins-systemz-vector2-constrained.c b/clang/test/CodeGen/builtins-systemz-vector2-constrained.c
new file mode 100644
index 000000000000..735b6a0249ab
--- /dev/null
+++ b/clang/test/CodeGen/builtins-systemz-vector2-constrained.c
@@ -0,0 +1,69 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z14 -triple s390x-ibm-linux -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+
+typedef __attribute__((vector_size(16))) double vec_double;
+typedef __attribute__((vector_size(16))) float vec_float;
+
+volatile vec_double vd;
+volatile vec_float vf;
+
+void test_float(void) {
+  vd = __builtin_s390_vfmaxdb(vd, vd, 4);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+  vd = __builtin_s390_vfmindb(vd, vd, 4);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  vd = __builtin_s390_vfmindb(vd, vd, 0);
+
+  vd = __builtin_s390_vfnmadb(vd, vd, vd);
+  // CHECK: [[RES:%[^ ]+]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK: fneg <2 x double> [[RES]]
+
+  vd = __builtin_s390_vfnmsdb(vd, vd, vd);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <2 x double> {{.*}}
+  // CHECK:  [[RES:%[^ ]+]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
+  // CHECK: fneg <2 x double> [[RES]]
+
+  vf = __builtin_s390_vfmaxsb(vf, vf, 4);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+
+  vf = __builtin_s390_vfminsb(vf, vf, 4);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+
+  vf = __builtin_s390_vfsqsb(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+
+  vf = __builtin_s390_vfmasb(vf, vf, vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+  vf = __builtin_s390_vfmssb(vf, vf, vf);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <4 x float> %{{.*}}
+  // CHECK: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
+  vf = __builtin_s390_vfnmasb(vf, vf, vf);
+  // CHECK: [[RES:%[^ ]+]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: fneg <4 x float> [[RES]]
+  vf = __builtin_s390_vfnmssb(vf, vf, vf);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <4 x float> %{{.*}}
+  // CHECK: [[RES:%[^ ]+]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
+  // CHECK: fneg <4 x float> [[RES]]
+
+  vf = __builtin_s390_vflpsb(vf);
+  // CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}})
+  vf = __builtin_s390_vflnsb(vf);
+  // CHECK: [[ABS:%[^ ]+]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}})
+  // CHECK: fneg <4 x float> [[ABS]]
+
+  vf = __builtin_s390_vfisb(vf, 0, 0);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  vf = __builtin_s390_vfisb(vf, 4, 0);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  vf = __builtin_s390_vfisb(vf, 4, 1);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  vf = __builtin_s390_vfisb(vf, 4, 5);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  vf = __builtin_s390_vfisb(vf, 4, 6);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  vf = __builtin_s390_vfisb(vf, 4, 7);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+}
+

diff  --git a/clang/test/CodeGen/builtins-systemz-zvector-constrained.c b/clang/test/CodeGen/builtins-systemz-zvector-constrained.c
new file mode 100644
index 000000000000..42c70ce49f65
--- /dev/null
+++ b/clang/test/CodeGen/builtins-systemz-zvector-constrained.c
@@ -0,0 +1,317 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
+
+#include <vecintrin.h>
+
+volatile vector signed long long vsl;
+volatile vector unsigned long long vul;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile double d;
+
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile int idx;
+
+void test_core(void) {
+  // CHECK-ASM-LABEL: test_core
+
+  d = vec_extract(vd, idx);
+  // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlgvg
+
+  vd = vec_insert(d, vd, idx);
+  // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlvgg
+
+  vd = vec_promote(d, idx);
+  // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlvgg
+
+  vd = vec_insert_and_zero(cptrd);
+  // CHECK: [[ZVEC:%[^ ]+]] = insertelement <2 x double> <double undef, double 0.000000e+00>, double {{.*}}, i32 0
+  // CHECK-ASM: vllezg
+
+  vd = vec_revb(vd);
+  // CHECK-ASM: vperm
+
+  vd = vec_reve(vd);
+  // CHECK-ASM: {{vperm|vpdi}}
+
+  vd = vec_sel(vd, vd, vul);
+  // CHECK-ASM: vsel
+  vd = vec_sel(vd, vd, vbl);
+  // CHECK-ASM: vsel
+
+  vd = vec_gather_element(vd, vul, cptrd, 0);
+  // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
+  vd = vec_gather_element(vd, vul, cptrd, 1);
+  // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
+
+  vec_scatter_element(vd, vul, ptrd, 0);
+  // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
+  vec_scatter_element(vd, vul, ptrd, 1);
+  // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
+
+  vd = vec_xl(idx, cptrd);
+  // CHECK-ASM: vl
+
+  vd = vec_xld2(idx, cptrd);
+  // CHECK-ASM: vl
+
+  vec_xst(vd, idx, ptrd);
+  // CHECK-ASM: vst
+
+  vec_xstd2(vd, idx, ptrd);
+  // CHECK-ASM: vst
+
+  vd = vec_splat(vd, 0);
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+  // CHECK-ASM: vrepg
+  vd = vec_splat(vd, 1);
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+  // CHECK-ASM: vrepg
+
+  vd = vec_splats(d);
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+  // CHECK-ASM: vlrepg
+
+  vd = vec_mergeh(vd, vd);
+  // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
+  // CHECK-ASM: vmrhg
+
+  vd = vec_mergel(vd, vd);
+  // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
+  // CHECK-ASM: vmrlg
+}
+
+void test_compare(void) {
+  // CHECK-ASM-LABEL: test_compare
+
+  vbl = vec_cmpeq(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"oeq", metadata !{{.*}})
+  // CHECK-ASM: vfcedb
+
+  vbl = vec_cmpge(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"oge", metadata !{{.*}})
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: vst
+
+  vbl = vec_cmpgt(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"ogt", metadata !{{.*}})
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: vst
+
+  vbl = vec_cmple(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"ole", metadata !{{.*}})
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: vst
+
+  vbl = vec_cmplt(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"olt", metadata !{{.*}})
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: kdbr
+  // CHECK-ASM: vst
+
+  idx = vec_all_lt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_all_nge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+  idx = vec_all_ngt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+  idx = vec_all_nle(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+  idx = vec_all_nlt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_all_nan(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+  idx = vec_all_numeric(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+
+  idx = vec_any_eq(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfcedbs
+
+  idx = vec_any_ne(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfcedbs
+
+  idx = vec_any_ge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_any_gt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_le(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_any_lt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_nge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+  idx = vec_any_ngt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+  idx = vec_any_nle(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+  idx = vec_any_nlt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_nan(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+  idx = vec_any_numeric(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+}
+
+void test_float(void) {
+  // CHECK-ASM-LABEL: test_float
+
+  vd = vec_abs(vd);
+  // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+  // CHECK-ASM: vflpdb
+
+  vd = vec_nabs(vd);
+  // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+  // CHECK-NEXT: fneg <2 x double> [[ABS]]
+  // CHECK-ASM: vflndb
+
+  vd = vec_madd(vd, vd, vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfmadb
+  vd = vec_msub(vd, vd, vd);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <2 x double> %{{.*}}
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
+  // CHECK-ASM: vfmsdb
+  vd = vec_sqrt(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfsqdb
+
+  vd = vec_ld2f(cptrf);
+  // CHECK: [[VAL:%[^ ]+]] = load <2 x float>, <2 x float>* %{{.*}}
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> [[VAL]], metadata !{{.*}})
+  // (emulated)
+  vec_st2f(vd, ptrf);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: store <2 x float> [[VAL]], <2 x float>* %{{.*}}
+  // (emulated)
+
+  vd = vec_ctd(vsl, 0);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // (emulated)
+  vd = vec_ctd(vul, 0);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // (emulated)
+  vd = vec_ctd(vsl, 1);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> <double 5.000000e-01, double 5.000000e-01>, metadata !{{.*}})
+  // (emulated)
+  vd = vec_ctd(vul, 1);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> <double 5.000000e-01, double 5.000000e-01>, metadata !{{.*}})
+  // (emulated)
+  vd = vec_ctd(vsl, 31);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> <double 0x3E00000000000000, double 0x3E00000000000000>, metadata !{{.*}})
+  // (emulated)
+  vd = vec_ctd(vul, 31);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> <double 0x3E00000000000000, double 0x3E00000000000000>, metadata !{{.*}})
+  // (emulated)
+
+  vsl = vec_ctsl(vd, 0);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // (emulated)
+  vul = vec_ctul(vd, 0);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // (emulated)
+  vsl = vec_ctsl(vd, 1);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> {{.*}}, <2 x double> <double 2.000000e+00, double 2.000000e+00>, metadata !{{.*}})
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
+  // (emulated)
+  vul = vec_ctul(vd, 1);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %{{.*}}, <2 x double> <double 2.000000e+00, double 2.000000e+00>, metadata !{{.*}})
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
+  // (emulated)
+  vsl = vec_ctsl(vd, 31);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %{{.*}}, <2 x double> <double 0x41E0000000000000, double 0x41E0000000000000>, metadata !{{.*}})
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
+  // (emulated)
+  vul = vec_ctul(vd, 31);
+  // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %{{.*}}, <2 x double> <double 0x41E0000000000000, double 0x41E0000000000000>, metadata !{{.*}})
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
+  // (emulated)
+
+  vd = vec_double(vsl);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcdgb
+  vd = vec_double(vul);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcdlgb
+
+  vsl = vec_signed(vd);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcgdb
+  vul = vec_unsigned(vd);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vclgdb
+
+  vd = vec_roundp(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
+  vd = vec_ceil(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
+  vd = vec_roundm(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
+  vd = vec_floor(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
+  vd = vec_roundz(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
+  vd = vec_trunc(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
+  vd = vec_roundc(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 0
+  vd = vec_rint(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
+  vd = vec_round(vd);
+}

diff  --git a/clang/test/CodeGen/builtins-systemz-zvector2-constrained.c b/clang/test/CodeGen/builtins-systemz-zvector2-constrained.c
new file mode 100644
index 000000000000..8f94fb7b9e2e
--- /dev/null
+++ b/clang/test/CodeGen/builtins-systemz-zvector2-constrained.c
@@ -0,0 +1,543 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z14 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-cpu z14 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
+
+#include <vecintrin.h>
+
+volatile vector signed long long vsl;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector float vf;
+volatile vector double vd;
+
+volatile float f;
+volatile double d;
+
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile int idx;
+int cc;
+
+void test_core(void) {
+  // CHECK-ASM-LABEL: test_core
+  vector float vf2;
+  vector double vd2;
+
+  f = vec_extract(vf, 0);
+  // CHECK: extractelement <4 x float> %{{.*}}, i32 0
+  // CHECK-ASM: vstef
+  f = vec_extract(vf, idx);
+  // CHECK: extractelement <4 x float> %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlgvf
+  d = vec_extract(vd, 0);
+  // CHECK: extractelement <2 x double> %{{.*}}, i32 0
+  // CHECK-ASM: vsteg
+  d = vec_extract(vd, idx);
+  // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlgvg
+
+  vf2 = vf;
+  vf = vec_insert(f, vf2, 0);
+  // CHECK: insertelement <4 x float> %{{.*}}, float %{{.*}}, i32 0
+  // CHECK-ASM: vlef
+  vf = vec_insert(0.0f, vf, 1);
+  // CHECK: insertelement <4 x float> %{{.*}}, float 0.000000e+00, i32 1
+  // CHECK-ASM: vleif %{{.*}}, 0, 1
+  vf = vec_insert(f, vf, idx);
+  // CHECK: insertelement <4 x float> %{{.*}}, float %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlvgf
+  vd2 = vd;
+  vd = vec_insert(d, vd2, 0);
+  // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 0
+  // CHECK-ASM: vleg
+  vd = vec_insert(0.0, vd, 1);
+  // CHECK: insertelement <2 x double> %{{.*}}, double 0.000000e+00, i32 1
+  // CHECK-ASM: vleig %{{.*}}, 0, 1
+  vd = vec_insert(d, vd, idx);
+  // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlvgg
+
+  vf = vec_promote(f, idx);
+  // CHECK: insertelement <4 x float> undef, float %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlvgf
+  vd = vec_promote(d, idx);
+  // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}}
+  // CHECK-ASM: vlvgg
+
+  vf = vec_insert_and_zero(cptrf);
+  // CHECK: insertelement <4 x float> <float 0.000000e+00, float undef, float 0.000000e+00, float 0.000000e+00>, float {{.*}}, i32 1
+  // CHECK-ASM: vllezf
+  vd = vec_insert_and_zero(cptrd);
+  // CHECK: insertelement <2 x double> <double undef, double 0.000000e+00>, double %{{.*}}, i32 0
+  // CHECK-ASM: vllezg
+
+  vf = vec_revb(vf);
+  // CHECK-ASM: vperm
+  vd = vec_revb(vd);
+  // CHECK-ASM: vperm
+
+  vf = vec_reve(vf);
+  // CHECK-ASM: vperm
+  vd = vec_reve(vd);
+  // CHECK-ASM: {{vperm|vpdi}}
+
+  vf = vec_gather_element(vf, vui, cptrf, 0);
+  // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 0
+  vf = vec_gather_element(vf, vui, cptrf, 1);
+  // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 1
+  vf = vec_gather_element(vf, vui, cptrf, 2);
+  // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 2
+  vf = vec_gather_element(vf, vui, cptrf, 3);
+  // CHECK-ASM: vgef %{{.*}}, 0(%{{.*}},%{{.*}}), 3
+  vd = vec_gather_element(vd, vul, cptrd, 0);
+  // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
+  vd = vec_gather_element(vd, vul, cptrd, 1);
+  // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
+
+  vec_scatter_element(vf, vui, ptrf, 0);
+  // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 0
+  vec_scatter_element(vf, vui, ptrf, 1);
+  // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 1
+  vec_scatter_element(vf, vui, ptrf, 2);
+  // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 2
+  vec_scatter_element(vf, vui, ptrf, 3);
+  // CHECK-ASM: vscef %{{.*}}, 0(%{{.*}},%{{.*}}), 3
+  vec_scatter_element(vd, vul, ptrd, 0);
+  // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
+  vec_scatter_element(vd, vul, ptrd, 1);
+  // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
+
+  vf = vec_xl(idx, cptrf);
+  // CHECK-ASM: vl
+  vd = vec_xl(idx, cptrd);
+  // CHECK-ASM: vl
+
+  vec_xst(vf, idx, ptrf);
+  // CHECK-ASM: vst
+  vec_xst(vd, idx, ptrd);
+  // CHECK-ASM: vst
+
+  vf = vec_splat(vf, 0);
+  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> zeroinitializer
+  // CHECK-ASM: vrepf
+  vf = vec_splat(vf, 1);
+  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  // CHECK-ASM: vrepf
+  vd = vec_splat(vd, 0);
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+  // CHECK-ASM: vrepg
+  vd = vec_splat(vd, 1);
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+  // CHECK-ASM: vrepg
+
+  vf = vec_splats(f);
+  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> zeroinitializer
+  // CHECK-ASM: vlrepf
+  vd = vec_splats(d);
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+  // CHECK-ASM: vlrepg
+
+  vf = vec_mergeh(vf, vf);
+  // shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+  // CHECK-ASM: vmrhf
+  vd = vec_mergeh(vd, vd);
+  // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
+  // CHECK-ASM: vmrhg
+
+  vf = vec_mergel(vf, vf);
+  // shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+  // CHECK-ASM: vmrlf
+  vd = vec_mergel(vd, vd);
+  // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
+  // CHECK-ASM: vmrlg
+}
+
+void test_compare(void) {
+  // CHECK-ASM-LABEL: test_compare
+
+  vbi = vec_cmpeq(vf, vf);
+  // CHECK: call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"oeq", metadata !{{.*}})
+  // CHECK-ASM: vfcesb
+  vbl = vec_cmpeq(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"oeq", metadata !{{.*}})
+  // CHECK-ASM: vfcedb
+
+  vbi = vec_cmpge(vf, vf);
+  // CHECK: call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"oge", metadata !{{.*}})
+  // CHECK-ASM: vfkhesb
+  vbl = vec_cmpge(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"oge", metadata !{{.*}})
+  // CHECK-ASM: vfkhedb
+
+  vbi = vec_cmpgt(vf, vf);
+  // CHECK: call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ogt", metadata !{{.*}})
+  // CHECK-ASM: vfkhsb
+  vbl = vec_cmpgt(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"ogt", metadata !{{.*}})
+  // CHECK-ASM: vfkhdb
+
+  vbi = vec_cmple(vf, vf);
+  // CHECK: call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ole", metadata !{{.*}})
+  // CHECK-ASM: vfkhesb
+  vbl = vec_cmple(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"ole", metadata !{{.*}})
+  // CHECK-ASM: vfkhedb
+
+  vbi = vec_cmplt(vf, vf);
+  // CHECK: call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"olt", metadata !{{.*}})
+  // CHECK-ASM: vfkhsb
+  vbl = vec_cmplt(vd, vd);
+  // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"olt", metadata !{{.*}})
+  // CHECK-ASM: vfkhdb
+
+  idx = vec_all_eq(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfcesbs
+  idx = vec_all_eq(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfcedbs
+
+  idx = vec_all_ne(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfcesbs
+  idx = vec_all_ne(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfcedbs
+
+  idx = vec_all_ge(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_all_ge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_all_gt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_all_gt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_all_le(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_all_le(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_all_lt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_all_lt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_all_nge(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_all_nge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_all_ngt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_all_ngt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_all_nle(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_all_nle(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_all_nlt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_all_nlt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_all_nan(vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcisb
+  idx = vec_all_nan(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+
+  idx = vec_all_numeric(vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcisb
+  idx = vec_all_numeric(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+
+  idx = vec_any_eq(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfcesbs
+  idx = vec_any_eq(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfcedbs
+
+  idx = vec_any_ne(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfcesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfcesbs
+  idx = vec_any_ne(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfcedbs
+
+  idx = vec_any_ge(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_any_ge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_any_gt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_any_gt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_le(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_any_le(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_any_lt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_any_lt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_nge(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_any_nge(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_any_ngt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_any_ngt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_nle(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchesbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchesbs
+  idx = vec_any_nle(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchedbs
+
+  idx = vec_any_nlt(vf, vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfchsbs(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK-ASM: vfchsbs
+  idx = vec_any_nlt(vd, vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK-ASM: vfchdbs
+
+  idx = vec_any_nan(vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcisb
+  idx = vec_any_nan(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+
+  idx = vec_any_numeric(vf);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcisb
+  idx = vec_any_numeric(vd);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+  // CHECK-ASM: vftcidb
+}
+
+void test_float(void) {
+  // CHECK-ASM-LABEL: test_float
+
+  vf = vec_abs(vf);
+  // CHECK: call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}})
+  // CHECK-ASM: vflpsb
+  vd = vec_abs(vd);
+  // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+  // CHECK-ASM: vflpdb
+
+  vf = vec_nabs(vf);
+  // CHECK: [[ABS:%[^ ]+]] = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %{{.*}})
+  // CHECK-NEXT: fneg <4 x float> [[ABS]]
+  // CHECK-ASM: vflnsb
+  vd = vec_nabs(vd);
+  // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+  // CHECK-NEXT: fneg <2 x double> [[ABS]]
+  // CHECK-ASM: vflndb
+
+  vf = vec_max(vf, vf);
+  // CHECK: call <4 x float> @llvm.s390.vfmaxsb(<4 x float> %{{.*}}, <4 x float> %{{.*}}, i32 0)
+  // CHECK-ASM: vfmaxsb
+  vd = vec_max(vd, vd);
+  // CHECK: call <2 x double> @llvm.s390.vfmaxdb(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i32 0)
+  // CHECK-ASM: vfmaxdb
+
+  vf = vec_min(vf, vf);
+  // CHECK: call <4 x float> @llvm.s390.vfminsb(<4 x float> %{{.*}}, <4 x float> %{{.*}}, i32 0)
+  // CHECK-ASM: vfminsb
+  vd = vec_min(vd, vd);
+  // CHECK: call <2 x double> @llvm.s390.vfmindb(<2 x double> %{{.*}}, <2 x double> %{{.*}}, i32 0)
+  // CHECK-ASM: vfmindb
+
+  vf = vec_madd(vf, vf, vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfmasb
+  vd = vec_madd(vd, vd, vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfmadb
+
+  vf = vec_msub(vf, vf, vf);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <4 x float> %{{.*}}
+  // CHECK: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
+  // CHECK-ASM: vfmssb
+  vd = vec_msub(vd, vd, vd);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <2 x double> %{{.*}}
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
+  // CHECK-ASM: vfmsdb
+
+  vf = vec_nmadd(vf, vf, vf);
+  // CHECK: [[RES:%[^ ]+]] = tail call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: fneg <4 x float> [[RES]]
+  // CHECK-ASM: vfnmasb
+  vd = vec_nmadd(vd, vd, vd);
+  // CHECK: [[RES:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: fneg <2 x double> [[RES]]
+  // CHECK-ASM: vfnmadb
+
+  vf = vec_nmsub(vf, vf, vf);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <4 x float> %{{.*}}
+  // CHECK: [[RES:%[^ ]+]] = tail call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
+  // CHECK: fneg <4 x float> [[RES]]
+  // CHECK-ASM: vfnmssb
+  vd = vec_nmsub(vd, vd, vd);
+  // CHECK: [[NEG:%[^ ]+]] = fneg <2 x double> %{{.*}}
+  // CHECK: [[RES:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
+  // CHECK: fneg <2 x double> [[RES]]
+  // CHECK-ASM: vfnmsdb
+
+  vf = vec_sqrt(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfsqsb
+  vd = vec_sqrt(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfsqdb
+
+  vd = vec_doublee(vf);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vldeb
+  vf = vec_floate(vd);
+  // CHECK: call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vledb
+
+  vd = vec_double(vsl);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcdgb
+  vd = vec_double(vul);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcdlgb
+
+  vsl = vec_signed(vd);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcgdb
+  vul = vec_unsigned(vd);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vclgdb
+
+  vf = vec_roundp(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 6
+  vf = vec_ceil(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 6
+  vd = vec_roundp(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
+  vd = vec_ceil(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
+
+  vf = vec_roundm(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 7
+  vf = vec_floor(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 7
+  vd = vec_roundm(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
+  vd = vec_floor(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
+
+  vf = vec_roundz(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
+  vf = vec_trunc(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
+  vd = vec_roundz(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
+  vd = vec_trunc(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
+
+  vf = vec_roundc(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 0
+  vd = vec_roundc(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 0
+
+  vf = vec_rint(vf);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 0, 0
+  vd = vec_rint(vd);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
+
+  vf = vec_round(vf);
+  // CHECK: call <4 x float> @llvm.s390.vfisb(<4 x float> %{{.*}}, i32 4, i32 4)
+  // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 4
+  vd = vec_round(vd);
+  // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
+  // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 4
+
+  vbi = vec_fp_test_data_class(vf, 0, &cc);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 0)
+  // CHECK-ASM: vftcisb
+  vbi = vec_fp_test_data_class(vf, 4095, &cc);
+  // CHECK: call { <4 x i32>, i32 } @llvm.s390.vftcisb(<4 x float> %{{.*}}, i32 4095)
+  // CHECK-ASM: vftcisb
+  vbl = vec_fp_test_data_class(vd, 0, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0)
+  // CHECK-ASM: vftcidb
+  vbl = vec_fp_test_data_class(vd, 4095, &cc);
+  // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
+  // CHECK-ASM: vftcidb
+}

diff  --git a/clang/test/CodeGen/builtins-systemz-zvector3-constrained.c b/clang/test/CodeGen/builtins-systemz-zvector3-constrained.c
new file mode 100644
index 000000000000..b599470c19a2
--- /dev/null
+++ b/clang/test/CodeGen/builtins-systemz-zvector3-constrained.c
@@ -0,0 +1,109 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -flax-vector-conversions=none \
+// RUN: -ffp-exception-behavior=strict \
+// RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
+
+#include <vecintrin.h>
+
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector float vf;
+volatile vector double vd;
+
+volatile float f;
+volatile double d;
+
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile int idx;
+
+void test_core(void) {
+  // CHECK-ASM-LABEL: test_core
+  vector float vf2;
+  vector double vd2;
+
+  vf += vec_revb(vec_xl(idx, cptrf));
+  // CHECK-ASM: vlbrf
+  vd += vec_revb(vec_xl(idx, cptrd));
+  // CHECK-ASM: vlbrg
+
+  vec_xst(vec_revb(vf), idx, ptrf);
+  // CHECK-ASM: vstbrf
+  vec_xst(vec_revb(vd), idx, ptrd);
+  // CHECK-ASM: vstbrg
+
+  vf += vec_revb(vec_insert_and_zero(cptrf));
+  // CHECK-ASM: vllebrzf
+  vd += vec_revb(vec_insert_and_zero(cptrd));
+  // CHECK-ASM: vllebrzg
+
+  vf += vec_revb(vec_splats(f));
+  // CHECK-ASM: vlbrrepf
+  vd += vec_revb(vec_splats(d));
+  // CHECK-ASM: vlbrrepg
+
+  vf2 = vf;
+  vf += vec_revb(vec_insert(f, vec_revb(vf2), 0));
+  // CHECK-ASM: vlebrf
+  vd2 = vd;
+  vd += vec_revb(vec_insert(d, vec_revb(vd2), 0));
+  // CHECK-ASM: vlebrg
+
+  f = vec_extract(vec_revb(vf), 0);
+  // CHECK-ASM: vstebrf
+  d = vec_extract(vec_revb(vd), 0);
+  // CHECK-ASM: vstebrg
+
+  vf += vec_reve(vec_xl(idx, cptrf));
+  // CHECK-ASM: vlerf
+  vd += vec_reve(vec_xl(idx, cptrd));
+  // CHECK-ASM: vlerg
+
+  vec_xst(vec_reve(vf), idx, ptrf);
+  // CHECK-ASM: vsterf
+  vec_xst(vec_reve(vd), idx, ptrd);
+  // CHECK-ASM: vsterg
+}
+
+void test_float(void) {
+  // CHECK-ASM-LABEL: test_float
+
+  vd = vec_double(vsl);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcdgb
+  vd = vec_double(vul);
+  // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcdlgb
+  vf = vec_float(vsi);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcefb
+  vf = vec_float(vui);
+  // CHECK: call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcelfb
+
+  vsl = vec_signed(vd);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcgdb
+  vsi = vec_signed(vf);
+  // CHECK: call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vcfeb
+  vul = vec_unsigned(vd);
+  // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vclgdb
+  vui = vec_unsigned(vf);
+  // xHECK: fptoui <4 x float> %{{.*}} to <4 x i32>
+  // CHECK: call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK-ASM: vclfeb
+}
+


        


More information about the cfe-commits mailing list