[clang] 4ea8e27 - [SveEmitter] Add builtins to insert/extract subvectors from tuples (svget/svset)

Sander de Smalen via cfe-commits cfe-commits at lists.llvm.org
Thu Jun 18 03:06:55 PDT 2020


Author: Sander de Smalen
Date: 2020-06-18T11:06:16+01:00
New Revision: 4ea8e27a642c6f97ca69cd39bbe44f7366870f6c

URL: https://github.com/llvm/llvm-project/commit/4ea8e27a642c6f97ca69cd39bbe44f7366870f6c
DIFF: https://github.com/llvm/llvm-project/commit/4ea8e27a642c6f97ca69cd39bbe44f7366870f6c.diff

LOG: [SveEmitter] Add builtins to insert/extract subvectors from tuples (svget/svset)

For example:
  svint32_t svget4(svint32x4_t tuple, uint64_t imm_index)

returns the subvector at `index`, which must be in range `0..3`.
  svint32x3_t svset3(svint32x3_t tuple, uint64_t index, svint32_t vec)

returns a tuple vector with `vec` inserted into `tuple` at `index`,
which must be in range `0..2`.

Reviewers: c-rhodes, efriedma

Reviewed By: c-rhodes

Tags: #clang

Differential Revision: https://reviews.llvm.org/D81464

Added: 
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get2.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get3.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get4.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set2.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set3.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set4.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get2.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get3.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get4.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set2.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set3.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set4.c

Modified: 
    clang/include/clang/Basic/TargetBuiltins.h
    clang/include/clang/Basic/arm_sve.td
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/lib/Sema/SemaChecking.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index 38d82d1d869f..eba055c302a7 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -248,6 +248,8 @@ namespace clang {
     bool isReverseUSDOT() const { return Flags & ReverseUSDOT; }
     bool isUndef() const { return Flags & IsUndef; }
     bool isTupleCreate() const { return Flags & IsTupleCreate; }
+    bool isTupleGet() const { return Flags & IsTupleGet; }
+    bool isTupleSet() const { return Flags & IsTupleSet; }
 
     uint64_t getBits() const { return Flags; }
     bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }

diff  --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 0348a3754e22..7c8eb8d38f75 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -201,6 +201,8 @@ def ReverseCompare            : FlagType<0x20000000>; // Compare operands must b
 def ReverseUSDOT              : FlagType<0x40000000>; // Unsigned/signed operands must be swapped.
 def IsUndef                   : FlagType<0x80000000>; // Codegen `undef` of given type.
 def IsTupleCreate             : FlagType<0x100000000>;
+def IsTupleGet                : FlagType<0x200000000>;
+def IsTupleSet                : FlagType<0x400000000>;
 
 // These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
 class ImmCheckType<int val> {
@@ -219,6 +221,9 @@ def ImmCheckLaneIndexDot        : ImmCheckType<9>;  // 0..(128/(4*sizeinbits(elt
 def ImmCheckComplexRot90_270    : ImmCheckType<10>; // [90,270]
 def ImmCheckComplexRotAll90     : ImmCheckType<11>; // [0, 90, 180,270]
 def ImmCheck0_13                : ImmCheckType<12>; // 0..13
+def ImmCheck0_1                 : ImmCheckType<13>; // 0..1
+def ImmCheck0_2                 : ImmCheckType<14>; // 0..2
+def ImmCheck0_3                 : ImmCheckType<15>; // 0..3
 
 class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
   int Arg = arg;
@@ -1284,6 +1289,17 @@ def SVCREATE_2 : SInst<"svcreate2[_{d}]", "2dd",   "csilUcUsUiUlhfd", MergeNone,
 def SVCREATE_3 : SInst<"svcreate3[_{d}]", "3ddd",  "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create3", [IsTupleCreate]>;
 def SVCREATE_4 : SInst<"svcreate4[_{d}]", "4dddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create4", [IsTupleCreate]>;
 
+
+////////////////////////////////////////////////////////////////////////////////
+// Vector insertion and extraction
+def SVGET_2 : SInst<"svget2[_{d}]", "d2i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_1>]>;
+def SVGET_3 : SInst<"svget3[_{d}]", "d3i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_2>]>;
+def SVGET_4 : SInst<"svget4[_{d}]", "d4i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_3>]>;
+
+def SVSET_2 : SInst<"svset2[_{d}]", "22id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_1>]>;
+def SVSET_3 : SInst<"svset3[_{d}]", "33id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_2>]>;
+def SVSET_4 : SInst<"svset4[_{d}]", "44id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_3>]>;
+
 ////////////////////////////////////////////////////////////////////////////////
 // SVE2 WhileGE/GT
 let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 8f8481a71ffb..7a138c54fb36 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -8030,10 +8030,10 @@ SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
   if (TypeFlags.isOverloadWhileRW())
     return {getSVEPredType(TypeFlags), Ops[0]->getType()};
 
-  if (TypeFlags.isOverloadCvt())
+  if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
     return {Ops[0]->getType(), Ops.back()->getType()};
 
-  if (TypeFlags.isTupleCreate())
+  if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
     return {ResultType, Ops[0]->getType()};
 
   assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index b847161611ce..821074d8b57f 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -2131,6 +2131,18 @@ bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
               diag::err_rotation_argument_to_cmla))
         HasError = true;
       break;
+    case SVETypeFlags::ImmCheck0_1:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
+        HasError = true;
+      break;
+    case SVETypeFlags::ImmCheck0_2:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
+        HasError = true;
+      break;
+    case SVETypeFlags::ImmCheck0_3:
+      if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
+        HasError = true;
+      break;
     }
   }
 

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get2.c
new file mode 100644
index 000000000000..b05612bb7c60
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get2.c
@@ -0,0 +1,99 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svget2_s8(svint8x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_s8
+  // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv32i8(<vscale x 32 x i8> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_s8,,)(tuple, 0);
+}
+
+svint16_t test_svget2_s16(svint16x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_s16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv16i16(<vscale x 16 x i16> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_s16,,)(tuple, 1);
+}
+
+svint32_t test_svget2_s32(svint32x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_s32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv8i32(<vscale x 8 x i32> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_s32,,)(tuple, 0);
+}
+
+svint64_t test_svget2_s64(svint64x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_s64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv4i64(<vscale x 4 x i64> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_s64,,)(tuple, 1);
+}
+
+svuint8_t test_svget2_u8(svuint8x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_u8
+  // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv32i8(<vscale x 32 x i8> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_u8,,)(tuple, 0);
+}
+
+svuint16_t test_svget2_u16(svuint16x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_u16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv16i16(<vscale x 16 x i16> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_u16,,)(tuple, 1);
+}
+
+svuint32_t test_svget2_u32(svuint32x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_u32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv8i32(<vscale x 8 x i32> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_u32,,)(tuple, 0);
+}
+
+svuint64_t test_svget2_u64(svuint64x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_u64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv4i64(<vscale x 4 x i64> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_u64,,)(tuple, 1);
+}
+
+svfloat16_t test_svget2_f16(svfloat16x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_f16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tuple.get.nxv8f16.nxv16f16(<vscale x 16 x half> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 8 x half> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_f16,,)(tuple, 0);
+}
+
+svfloat32_t test_svget2_f32(svfloat32x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_f32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tuple.get.nxv4f32.nxv8f32(<vscale x 8 x float> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 4 x float> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_f32,,)(tuple, 1);
+}
+
+svfloat64_t test_svget2_f64(svfloat64x2_t tuple)
+{
+  // CHECK-LABEL: test_svget2_f64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 2 x double> %[[EXT]]
+  return SVE_ACLE_FUNC(svget2,_f64,,)(tuple, 0);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get3.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get3.c
new file mode 100644
index 000000000000..4b788919055f
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get3.c
@@ -0,0 +1,99 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svget3_s8(svint8x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_s8
+  // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv48i8(<vscale x 48 x i8> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_s8,,)(tuple, 0);
+}
+
+svint16_t test_svget3_s16(svint16x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_s16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv24i16(<vscale x 24 x i16> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_s16,,)(tuple, 2);
+}
+
+svint32_t test_svget3_s32(svint32x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_s32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv12i32(<vscale x 12 x i32> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_s32,,)(tuple, 1);
+}
+
+svint64_t test_svget3_s64(svint64x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_s64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv6i64(<vscale x 6 x i64> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_s64,,)(tuple, 0);
+}
+
+svuint8_t test_svget3_u8(svuint8x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_u8
+  // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv48i8(<vscale x 48 x i8> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_u8,,)(tuple, 2);
+}
+
+svuint16_t test_svget3_u16(svuint16x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_u16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv24i16(<vscale x 24 x i16> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_u16,,)(tuple, 1);
+}
+
+svuint32_t test_svget3_u32(svuint32x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_u32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv12i32(<vscale x 12 x i32> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_u32,,)(tuple, 0);
+}
+
+svuint64_t test_svget3_u64(svuint64x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_u64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv6i64(<vscale x 6 x i64> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_u64,,)(tuple, 2);
+}
+
+svfloat16_t test_svget3_f16(svfloat16x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_f16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tuple.get.nxv8f16.nxv24f16(<vscale x 24 x half> %tuple, i32 1)
+  // CHECK-NEXT: ret <vscale x 8 x half> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_f16,,)(tuple, 1);
+}
+
+svfloat32_t test_svget3_f32(svfloat32x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_f32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tuple.get.nxv4f32.nxv12f32(<vscale x 12 x float> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 4 x float> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_f32,,)(tuple, 0);
+}
+
+svfloat64_t test_svget3_f64(svfloat64x3_t tuple)
+{
+  // CHECK-LABEL: test_svget3_f64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv6f64(<vscale x 6 x double> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 2 x double> %[[EXT]]
+  return SVE_ACLE_FUNC(svget3,_f64,,)(tuple, 2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get4.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get4.c
new file mode 100644
index 000000000000..c4ce575eac42
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_get4.c
@@ -0,0 +1,101 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+// NOTE: For these tests clang converts the struct parameter into
+// several parameters, one for each member of the original struct.
+svint8_t test_svget4_s8(svint8x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_s8
+  // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8(<vscale x 64 x i8> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_s8,,)(tuple, 0);
+}
+
+svint16_t test_svget4_s16(svint16x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_s16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16(<vscale x 32 x i16> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_s16,,)(tuple, 2);
+}
+
+svint32_t test_svget4_s32(svint32x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_s32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32(<vscale x 16 x i32> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_s32,,)(tuple, 2);
+}
+
+svint64_t test_svget4_s64(svint64x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_s64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64(<vscale x 8 x i64> %tuple, i32 3)
+  // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_s64,,)(tuple, 3);
+}
+
+svuint8_t test_svget4_u8(svuint8x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_u8
+  // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8(<vscale x 64 x i8> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_u8,,)(tuple, 2);
+}
+
+svuint16_t test_svget4_u16(svuint16x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_u16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16(<vscale x 32 x i16> %tuple, i32 3)
+  // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_u16,,)(tuple, 3);
+}
+
+svuint32_t test_svget4_u32(svuint32x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_u32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32(<vscale x 16 x i32> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_u32,,)(tuple, 0);
+}
+
+svuint64_t test_svget4_u64(svuint64x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_u64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64(<vscale x 8 x i64> %tuple, i32 3)
+  // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_u64,,)(tuple, 3);
+}
+
+svfloat16_t test_svget4_f16(svfloat16x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_f16
+  // CHECK: %[[EXT:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tuple.get.nxv8f16.nxv32f16(<vscale x 32 x half> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 8 x half> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_f16,,)(tuple, 2);
+}
+
+svfloat32_t test_svget4_f32(svfloat32x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_f32
+  // CHECK: %[[EXT:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tuple.get.nxv4f32.nxv16f32(<vscale x 16 x float> %tuple, i32 0)
+  // CHECK-NEXT: ret <vscale x 4 x float> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_f32,,)(tuple, 0);
+}
+
+svfloat64_t test_svget4_f64(svfloat64x4_t tuple)
+{
+  // CHECK-LABEL: test_svget4_f64
+  // CHECK: %[[EXT:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64(<vscale x 8 x double> %tuple, i32 2)
+  // CHECK-NEXT: ret <vscale x 2 x double> %[[EXT]]
+  return SVE_ACLE_FUNC(svget4,_f64,,)(tuple, 2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set2.c
new file mode 100644
index 000000000000..dc41543a4e83
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set2.c
@@ -0,0 +1,99 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8x2_t test_svset2_s8(svint8x2_t tuple, svint8_t x)
+{
+  // CHECK-LABEL: test_svset2_s8
+  // CHECK: %[[INSERT:.*]] = call <vscale x 32 x i8> @llvm.aarch64.sve.tuple.set.nxv32i8.nxv16i8(<vscale x 32 x i8> %tuple, i32 1, <vscale x 16 x i8> %x)
+  // CHECK-NEXT: ret <vscale x 32 x i8> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_s8,,)(tuple, 1, x);
+}
+
+svint16x2_t test_svset2_s16(svint16x2_t tuple, svint16_t x)
+{
+  // CHECK-LABEL: test_svset2_s16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 16 x i16> @llvm.aarch64.sve.tuple.set.nxv16i16.nxv8i16(<vscale x 16 x i16> %tuple, i32 0, <vscale x 8 x i16> %x)
+  // CHECK-NEXT: ret <vscale x 16 x i16> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_s16,,)(tuple, 0, x);
+}
+
+svint32x2_t test_svset2_s32(svint32x2_t tuple, svint32_t x)
+{
+  // CHECK-LABEL: test_svset2_s32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32(<vscale x 8 x i32> %tuple, i32 1, <vscale x 4 x i32> %x)
+  // CHECK-NEXT: ret <vscale x 8 x i32> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_s32,,)(tuple, 1, x);
+}
+
+svint64x2_t test_svset2_s64(svint64x2_t tuple, svint64_t x)
+{
+  // CHECK-LABEL: test_svset2_s64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 4 x i64> @llvm.aarch64.sve.tuple.set.nxv4i64.nxv2i64(<vscale x 4 x i64> %tuple, i32 0, <vscale x 2 x i64> %x)
+  // CHECK-NEXT: ret <vscale x 4 x i64> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_s64,,)(tuple, 0, x);
+}
+
+svuint8x2_t test_svset2_u8(svuint8x2_t tuple, svuint8_t x)
+{
+  // CHECK-LABEL: test_svset2_u8
+  // CHECK: %[[INSERT:.*]] = call <vscale x 32 x i8> @llvm.aarch64.sve.tuple.set.nxv32i8.nxv16i8(<vscale x 32 x i8> %tuple, i32 1, <vscale x 16 x i8> %x)
+  // CHECK-NEXT: ret <vscale x 32 x i8> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_u8,,)(tuple, 1, x);
+}
+
+svuint16x2_t test_svset2_u16(svuint16x2_t tuple, svuint16_t x)
+{
+  // CHECK-LABEL: test_svset2_u16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 16 x i16> @llvm.aarch64.sve.tuple.set.nxv16i16.nxv8i16(<vscale x 16 x i16> %tuple, i32 0, <vscale x 8 x i16> %x)
+  // CHECK-NEXT: ret <vscale x 16 x i16> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_u16,,)(tuple, 0, x);
+}
+
+svuint32x2_t test_svset2_u32(svuint32x2_t tuple, svuint32_t x)
+{
+  // CHECK-LABEL: test_svset2_u32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32(<vscale x 8 x i32> %tuple, i32 1, <vscale x 4 x i32> %x)
+  // CHECK-NEXT: ret <vscale x 8 x i32> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_u32,,)(tuple, 1, x);
+}
+
+svuint64x2_t test_svset2_u64(svuint64x2_t tuple, svuint64_t x)
+{
+  // CHECK-LABEL: test_svset2_u64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 4 x i64> @llvm.aarch64.sve.tuple.set.nxv4i64.nxv2i64(<vscale x 4 x i64> %tuple, i32 0, <vscale x 2 x i64> %x)
+  // CHECK-NEXT: ret <vscale x 4 x i64> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_u64,,)(tuple, 0, x);
+}
+
+svfloat16x2_t test_svset2_f16(svfloat16x2_t tuple, svfloat16_t x)
+{
+  // CHECK-LABEL: test_svset2_f16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 16 x half> @llvm.aarch64.sve.tuple.set.nxv16f16.nxv8f16(<vscale x 16 x half> %tuple, i32 1, <vscale x 8 x half> %x)
+  // CHECK-NEXT: ret <vscale x 16 x half> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_f16,,)(tuple, 1, x);
+}
+
+svfloat32x2_t test_svset2_f32(svfloat32x2_t tuple, svfloat32_t x)
+{
+  // CHECK-LABEL: test_svset2_f32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 8 x float> @llvm.aarch64.sve.tuple.set.nxv8f32.nxv4f32(<vscale x 8 x float> %tuple, i32 0, <vscale x 4 x float> %x)
+  // CHECK-NEXT: ret <vscale x 8 x float> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_f32,,)(tuple, 0, x);
+}
+
+svfloat64x2_t test_svset2_f64(svfloat64x2_t tuple, svfloat64_t x)
+{
+  // CHECK-LABEL: test_svset2_f64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 4 x double> @llvm.aarch64.sve.tuple.set.nxv4f64.nxv2f64(<vscale x 4 x double> %tuple, i32 1, <vscale x 2 x double> %x)
+  // CHECK-NEXT: ret <vscale x 4 x double> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset2,_f64,,)(tuple, 1, x);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set3.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set3.c
new file mode 100644
index 000000000000..77fd69bbbbc2
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set3.c
@@ -0,0 +1,102 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+
+// NOTE: For these tests clang converts the struct parameter into
+// several parameters, one for each member of the original struct.
+svint8x3_t test_svset3_s8(svint8x3_t tuple, svint8_t x)
+{
+  // CHECK-LABEL: test_svset3_s8
+  // CHECK: %[[INSERT:.*]] = call <vscale x 48 x i8> @llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8(<vscale x 48 x i8> %tuple, i32 1, <vscale x 16 x i8> %x)
+  // CHECK-NEXT: ret <vscale x 48 x i8> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_s8,,)(tuple, 1, x);
+}
+
+svint16x3_t test_svset3_s16(svint16x3_t tuple, svint16_t x)
+{
+  // CHECK-LABEL: test_svset3_s16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 24 x i16> @llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16(<vscale x 24 x i16> %tuple, i32 2, <vscale x 8 x i16> %x)
+  // CHECK-NEXT: ret <vscale x 24 x i16> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_s16,,)(tuple, 2, x);
+}
+
+svint32x3_t test_svset3_s32(svint32x3_t tuple, svint32_t x)
+{
+  // CHECK-LABEL: test_svset3_s32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 12 x i32> @llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32(<vscale x 12 x i32> %tuple, i32 0, <vscale x 4 x i32> %x)
+  // CHECK-NEXT: ret <vscale x 12 x i32> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_s32,,)(tuple, 0, x);
+}
+
+svint64x3_t test_svset3_s64(svint64x3_t tuple, svint64_t x)
+{
+  // CHECK-LABEL: test_svset3_s64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 6 x i64> @llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64(<vscale x 6 x i64> %tuple, i32 1, <vscale x 2 x i64> %x)
+  // CHECK-NEXT: ret <vscale x 6 x i64> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_s64,,)(tuple, 1, x);
+}
+
+svuint8x3_t test_svset3_u8(svuint8x3_t tuple, svuint8_t x)
+{
+  // CHECK-LABEL: test_svset3_u8
+  // CHECK: %[[INSERT:.*]] = call <vscale x 48 x i8> @llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8(<vscale x 48 x i8> %tuple, i32 2, <vscale x 16 x i8> %x)
+  // CHECK-NEXT: ret <vscale x 48 x i8> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_u8,,)(tuple, 2, x);
+}
+
+svuint16x3_t test_svset3_u16(svuint16x3_t tuple, svuint16_t x)
+{
+  // CHECK-LABEL: test_svset3_u16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 24 x i16> @llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16(<vscale x 24 x i16> %tuple, i32 0, <vscale x 8 x i16> %x)
+  // CHECK-NEXT: ret <vscale x 24 x i16> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_u16,,)(tuple, 0, x);
+}
+
+svuint32x3_t test_svset3_u32(svuint32x3_t tuple, svuint32_t x)
+{
+  // CHECK-LABEL: test_svset3_u32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 12 x i32> @llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32(<vscale x 12 x i32> %tuple, i32 1, <vscale x 4 x i32> %x)
+  // CHECK-NEXT: ret <vscale x 12 x i32> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_u32,,)(tuple, 1, x);
+}
+
+svuint64x3_t test_svset3_u64(svuint64x3_t tuple, svuint64_t x)
+{
+  // CHECK-LABEL: test_svset3_u64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 6 x i64> @llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64(<vscale x 6 x i64> %tuple, i32 2, <vscale x 2 x i64> %x)
+  // CHECK-NEXT: ret <vscale x 6 x i64> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_u64,,)(tuple, 2, x);
+}
+
+svfloat16x3_t test_svset3_f16(svfloat16x3_t tuple, svfloat16_t x)
+{
+  // CHECK-LABEL: test_svset3_f16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 24 x half> @llvm.aarch64.sve.tuple.set.nxv24f16.nxv8f16(<vscale x 24 x half> %tuple, i32 0, <vscale x 8 x half> %x)
+  // CHECK-NEXT: ret <vscale x 24 x half> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_f16,,)(tuple, 0, x);
+}
+
+svfloat32x3_t test_svset3_f32(svfloat32x3_t tuple, svfloat32_t x)
+{
+  // CHECK-LABEL: test_svset3_f32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 12 x float> @llvm.aarch64.sve.tuple.set.nxv12f32.nxv4f32(<vscale x 12 x float> %tuple, i32 1, <vscale x 4 x float> %x)
+  // CHECK-NEXT: ret <vscale x 12 x float> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_f32,,)(tuple, 1, x);
+}
+
+svfloat64x3_t test_svset3_f64(svfloat64x3_t tuple, svfloat64_t x)
+{
+  // CHECK-LABEL: test_svset3_f64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 6 x double> @llvm.aarch64.sve.tuple.set.nxv6f64.nxv2f64(<vscale x 6 x double> %tuple, i32 2, <vscale x 2 x double> %x)
+  // CHECK-NEXT: ret <vscale x 6 x double> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset3,_f64,,)(tuple, 2, x);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set4.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set4.c
new file mode 100644
index 000000000000..6522948d8ba6
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_set4.c
@@ -0,0 +1,100 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+
+svint8x4_t test_svset4_s8(svint8x4_t tuple, svint8_t x)
+{
+  // CHECK-LABEL: test_svset4_s8
+  // CHECK: %[[INSERT:.*]] = call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.set.nxv64i8.nxv16i8(<vscale x 64 x i8> %tuple, i32 1, <vscale x 16 x i8> %x)
+  // CHECK-NEXT: ret <vscale x 64 x i8> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_s8,,)(tuple, 1, x);
+}
+
+svint16x4_t test_svset4_s16(svint16x4_t tuple, svint16_t x)
+{
+  // CHECK-LABEL: test_svset4_s16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 32 x i16> @llvm.aarch64.sve.tuple.set.nxv32i16.nxv8i16(<vscale x 32 x i16> %tuple, i32 3, <vscale x 8 x i16> %x)
+  // CHECK-NEXT: ret <vscale x 32 x i16> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_s16,,)(tuple, 3, x);
+}
+
+svint32x4_t test_svset4_s32(svint32x4_t tuple, svint32_t x)
+{
+  // CHECK-LABEL: test_svset4_s32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.tuple.set.nxv16i32.nxv4i32(<vscale x 16 x i32> %tuple, i32 1, <vscale x 4 x i32> %x)
+  // CHECK-NEXT: ret <vscale x 16 x i32> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_s32,,)(tuple, 1, x);
+}
+
+svint64x4_t test_svset4_s64(svint64x4_t tuple, svint64_t x)
+{
+  // CHECK-LABEL: test_svset4_s64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 8 x i64> @llvm.aarch64.sve.tuple.set.nxv8i64.nxv2i64(<vscale x 8 x i64> %tuple, i32 1, <vscale x 2 x i64> %x)
+  // CHECK-NEXT: ret <vscale x 8 x i64> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_s64,,)(tuple, 1, x);
+}
+
+svuint8x4_t test_svset4_u8(svuint8x4_t tuple, svuint8_t x)
+{
+  // CHECK-LABEL: test_svset4_u8
+  // CHECK: %[[INSERT:.*]] = call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.set.nxv64i8.nxv16i8(<vscale x 64 x i8> %tuple, i32 3, <vscale x 16 x i8> %x)
+  // CHECK-NEXT: ret <vscale x 64 x i8> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_u8,,)(tuple, 3, x);
+}
+
+svuint16x4_t test_svset4_u16(svuint16x4_t tuple, svuint16_t x)
+{
+  // CHECK-LABEL: test_svset4_u16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 32 x i16> @llvm.aarch64.sve.tuple.set.nxv32i16.nxv8i16(<vscale x 32 x i16> %tuple, i32 1, <vscale x 8 x i16> %x)
+  // CHECK-NEXT: ret <vscale x 32 x i16> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_u16,,)(tuple, 1, x);
+}
+
+svuint32x4_t test_svset4_u32(svuint32x4_t tuple, svuint32_t x)
+{
+  // CHECK-LABEL: test_svset4_u32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.tuple.set.nxv16i32.nxv4i32(<vscale x 16 x i32> %tuple, i32 1, <vscale x 4 x i32> %x)
+  // CHECK-NEXT: ret <vscale x 16 x i32> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_u32,,)(tuple, 1, x);
+}
+
+svuint64x4_t test_svset4_u64(svuint64x4_t tuple, svuint64_t x)
+{
+  // CHECK-LABEL: test_svset4_u64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 8 x i64> @llvm.aarch64.sve.tuple.set.nxv8i64.nxv2i64(<vscale x 8 x i64> %tuple, i32 3, <vscale x 2 x i64> %x)
+  // CHECK-NEXT: ret <vscale x 8 x i64> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_u64,,)(tuple, 3, x);
+}
+
+svfloat16x4_t test_svset4_f16(svfloat16x4_t tuple, svfloat16_t x)
+{
+  // CHECK-LABEL: test_svset4_f16
+  // CHECK: %[[INSERT:.*]] = call <vscale x 32 x half> @llvm.aarch64.sve.tuple.set.nxv32f16.nxv8f16(<vscale x 32 x half> %tuple, i32 1, <vscale x 8 x half> %x)
+  // CHECK-NEXT: ret <vscale x 32 x half> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_f16,,)(tuple, 1, x);
+}
+
+svfloat32x4_t test_svset4_f32(svfloat32x4_t tuple, svfloat32_t x)
+{
+  // CHECK-LABEL: test_svset4_f32
+  // CHECK: %[[INSERT:.*]] = call <vscale x 16 x float> @llvm.aarch64.sve.tuple.set.nxv16f32.nxv4f32(<vscale x 16 x float> %tuple, i32 1, <vscale x 4 x float> %x)
+  // CHECK-NEXT: ret <vscale x 16 x float> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_f32,,)(tuple, 1, x);
+}
+
+svfloat64x4_t test_svset4_f64(svfloat64x4_t tuple, svfloat64_t x)
+{
+  // CHECK-LABEL: test_svset4_f64
+  // CHECK: %[[INSERT:.*]] = call <vscale x 8 x double> @llvm.aarch64.sve.tuple.set.nxv8f64.nxv2f64(<vscale x 8 x double> %tuple, i32 3, <vscale x 2 x double> %x)
+  // CHECK-NEXT: ret <vscale x 8 x double> %[[INSERT]]
+  return SVE_ACLE_FUNC(svset4,_f64,,)(tuple, 3, x);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get2.c
new file mode 100644
index 000000000000..7bd084490c2b
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get2.c
@@ -0,0 +1,143 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svget2_s8(svint8x2_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_s8,,)(tuple, -1);
+}
+
+svint16_t test_svget2_s16(svint16x2_t tuple)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_s16,,)(tuple, 2);
+}
+
+svint32_t test_svget2_s32(svint32x2_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_s32,,)(tuple, -1);
+}
+
+svint64_t test_svget2_s64(svint64x2_t tuple)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_s64,,)(tuple, 2);
+}
+
+svuint8_t test_svget2_u8(svuint8x2_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_u8,,)(tuple, -1);
+}
+
+svuint16_t test_svget2_u16(svuint16x2_t tuple)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_u16,,)(tuple, 2);
+}
+
+svuint32_t test_svget2_u32(svuint32x2_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_u32,,)(tuple, -1);
+}
+
+svuint64_t test_svget2_u64(svuint64x2_t tuple)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_u64,,)(tuple, 2);
+}
+
+svfloat16_t test_svget2_f16(svfloat16x2_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_f16,,)(tuple, -1);
+}
+
+svfloat32_t test_svget2_f32(svfloat32x2_t tuple)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_f32,,)(tuple, 2);
+}
+
+svfloat64_t test_svget2_f64(svfloat64x2_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svget2,_f64,,)(tuple, -1);
+}
+
+svint8_t test_svget2_s8_var(svint8x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_s8,,)(tuple, imm_index);
+}
+
+svint16_t test_svget2_s16_var(svint16x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_s16,,)(tuple, imm_index);
+}
+
+svint32_t test_svget2_s32_var(svint32x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_s32,,)(tuple, imm_index);
+}
+
+svint64_t test_svget2_s64_var(svint64x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_s64,,)(tuple, imm_index);
+}
+
+svuint8_t test_svget2_u8_var(svuint8x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_u8,,)(tuple, imm_index);
+}
+
+svuint16_t test_svget2_u16_var(svuint16x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_u16,,)(tuple, imm_index);
+}
+
+svuint32_t test_svget2_u32_var(svuint32x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_u32,,)(tuple, imm_index);
+}
+
+svuint64_t test_svget2_u64_var(svuint64x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_u64,,)(tuple, imm_index);
+}
+
+svfloat16_t test_svget2_f16_var(svfloat16x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_f16,,)(tuple, imm_index);
+}
+
+svfloat32_t test_svget2_f32_var(svfloat32x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_f32,,)(tuple, imm_index);
+}
+
+svfloat64_t test_svget2_f64_var(svfloat64x2_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget2,_f64,,)(tuple, imm_index);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get3.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get3.c
new file mode 100644
index 000000000000..91b91eb07a42
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get3.c
@@ -0,0 +1,143 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svget3_s8(svint8x3_t tuple)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_s8,,)(tuple, 3);
+}
+
+svint16_t test_svget3_s16(svint16x3_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_s16,,)(tuple, -1);
+}
+
+svint32_t test_svget3_s32(svint32x3_t tuple)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_s32,,)(tuple, 3);
+}
+
+svint64_t test_svget3_s64(svint64x3_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_s64,,)(tuple, -1);
+}
+
+svuint8_t test_svget3_u8(svuint8x3_t tuple)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_u8,,)(tuple, 3);
+}
+
+svuint16_t test_svget3_u16(svuint16x3_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_u16,,)(tuple, -1);
+}
+
+svuint32_t test_svget3_u32(svuint32x3_t tuple)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_u32,,)(tuple, 3);
+}
+
+svuint64_t test_svget3_u64(svuint64x3_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_u64,,)(tuple, -1);
+}
+
+svfloat16_t test_svget3_f16(svfloat16x3_t tuple)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_f16,,)(tuple, 3);
+}
+
+svfloat32_t test_svget3_f32(svfloat32x3_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_f32,,)(tuple, -1);
+}
+
+svfloat64_t test_svget3_f64(svfloat64x3_t tuple)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svget3,_f64,,)(tuple, 3);
+}
+
+svint8_t test_svget3_s8_var(svint8x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_s8,,)(tuple, imm_index);
+}
+
+svint16_t test_svget3_s16_var(svint16x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_s16,,)(tuple, imm_index);
+}
+
+svint32_t test_svget3_s32_var(svint32x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_s32,,)(tuple, imm_index);
+}
+
+svint64_t test_svget3_s64_var(svint64x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_s64,,)(tuple, imm_index);
+}
+
+svuint8_t test_svget3_u8_var(svuint8x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_u8,,)(tuple, imm_index);
+}
+
+svuint16_t test_svget3_u16_var(svuint16x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_u16,,)(tuple, imm_index);
+}
+
+svuint32_t test_svget3_u32_var(svuint32x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_u32,,)(tuple, imm_index);
+}
+
+svuint64_t test_svget3_u64_var(svuint64x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_u64,,)(tuple, imm_index);
+}
+
+svfloat16_t test_svget3_f16_var(svfloat16x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_f16,,)(tuple, imm_index);
+}
+
+svfloat32_t test_svget3_f32_var(svfloat32x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_f32,,)(tuple, imm_index);
+}
+
+svfloat64_t test_svget3_f64_var(svfloat64x3_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget3,_f64,,)(tuple, imm_index);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get4.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get4.c
new file mode 100644
index 000000000000..10e5175fd6ab
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_get4.c
@@ -0,0 +1,143 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svget4_s8(svint8x4_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_s8,,)(tuple, -1);
+}
+
+svint16_t test_svget4_s16(svint16x4_t tuple)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_s16,,)(tuple, 4);
+}
+
+svint32_t test_svget4_s32(svint32x4_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_s32,,)(tuple, -1);
+}
+
+svint64_t test_svget4_s64(svint64x4_t tuple)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_s64,,)(tuple, 4);
+}
+
+svuint8_t test_svget4_u8(svuint8x4_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_u8,,)(tuple, -1);
+}
+
+svuint16_t test_svget4_u16(svuint16x4_t tuple)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_u16,,)(tuple, 4);
+}
+
+svuint32_t test_svget4_u32(svuint32x4_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_u32,,)(tuple, -1);
+}
+
+svuint64_t test_svget4_u64(svuint64x4_t tuple)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_u64,,)(tuple, 4);
+}
+
+svfloat16_t test_svget4_f16(svfloat16x4_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_f16,,)(tuple, -1);
+}
+
+svfloat32_t test_svget4_f32(svfloat32x4_t tuple)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_f32,,)(tuple, 4);
+}
+
+svfloat64_t test_svget4_f64(svfloat64x4_t tuple)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svget4,_f64,,)(tuple, -1);
+}
+
+svint8_t test_svget4_s8_var(svint8x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_s8,,)(tuple, imm_index);
+}
+
+svint16_t test_svget4_s16_var(svint16x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_s16,,)(tuple, imm_index);
+}
+
+svint32_t test_svget4_s32_var(svint32x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_s32,,)(tuple, imm_index);
+}
+
+svint64_t test_svget4_s64_var(svint64x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_s64,,)(tuple, imm_index);
+}
+
+svuint8_t test_svget4_u8_var(svuint8x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_u8,,)(tuple, imm_index);
+}
+
+svuint16_t test_svget4_u16_var(svuint16x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_u16,,)(tuple, imm_index);
+}
+
+svuint32_t test_svget4_u32_var(svuint32x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_u32,,)(tuple, imm_index);
+}
+
+svuint64_t test_svget4_u64_var(svuint64x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_u64,,)(tuple, imm_index);
+}
+
+svfloat16_t test_svget4_f16_var(svfloat16x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_f16,,)(tuple, imm_index);
+}
+
+svfloat32_t test_svget4_f32_var(svfloat32x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_f32,,)(tuple, imm_index);
+}
+
+svfloat64_t test_svget4_f64_var(svfloat64x4_t tuple, uint64_t imm_index)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svget4,_f64,,)(tuple, imm_index);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set2.c
new file mode 100644
index 000000000000..621e0c89926c
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set2.c
@@ -0,0 +1,143 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8x2_t test_svset2_s8(svint8x2_t tuple, svint8_t x)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_s8,,)(tuple, 2, x);
+}
+
+svint16x2_t test_svset2_s16(svint16x2_t tuple, svint16_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_s16,,)(tuple, -1, x);
+}
+
+svint32x2_t test_svset2_s32(svint32x2_t tuple, svint32_t x)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_s32,,)(tuple, 2, x);
+}
+
+svint64x2_t test_svset2_s64(svint64x2_t tuple, svint64_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_s64,,)(tuple, -1, x);
+}
+
+svuint8x2_t test_svset2_u8(svuint8x2_t tuple, svuint8_t x)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_u8,,)(tuple, 2, x);
+}
+
+svuint16x2_t test_svset2_u16(svuint16x2_t tuple, svuint16_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_u16,,)(tuple, -1, x);
+}
+
+svuint32x2_t test_svset2_u32(svuint32x2_t tuple, svuint32_t x)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_u32,,)(tuple, 2, x);
+}
+
+svuint64x2_t test_svset2_u64(svuint64x2_t tuple, svuint64_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_u64,,)(tuple, -1, x);
+}
+
+svfloat16x2_t test_svset2_f16(svfloat16x2_t tuple, svfloat16_t x)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_f16,,)(tuple, 2, x);
+}
+
+svfloat32x2_t test_svset2_f32(svfloat32x2_t tuple, svfloat32_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_f32,,)(tuple, -1, x);
+}
+
+svfloat64x2_t test_svset2_f64(svfloat64x2_t tuple, svfloat64_t x)
+{
+  // expected-error at +1 {{argument value 2 is outside the valid range [0, 1]}}
+  return SVE_ACLE_FUNC(svset2,_f64,,)(tuple, 2, x);
+}
+
+svint8x2_t test_svset2_s8_var(svint8x2_t tuple, uint64_t imm_index, svint8_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_s8,,)(tuple, imm_index, x);
+}
+
+svint16x2_t test_svset2_s16_var(svint16x2_t tuple, uint64_t imm_index, svint16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_s16,,)(tuple, imm_index, x);
+}
+
+svint32x2_t test_svset2_s32_var(svint32x2_t tuple, uint64_t imm_index, svint32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_s32,,)(tuple, imm_index, x);
+}
+
+svint64x2_t test_svset2_s64_var(svint64x2_t tuple, uint64_t imm_index, svint64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_s64,,)(tuple, imm_index, x);
+}
+
+svuint8x2_t test_svset2_u8_var(svuint8x2_t tuple, uint64_t imm_index, svuint8_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_u8,,)(tuple, imm_index, x);
+}
+
+svuint16x2_t test_svset2_u16_var(svuint16x2_t tuple, uint64_t imm_index, svuint16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_u16,,)(tuple, imm_index, x);
+}
+
+svuint32x2_t test_svset2_u32_var(svuint32x2_t tuple, uint64_t imm_index, svuint32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_u32,,)(tuple, imm_index, x);
+}
+
+svuint64x2_t test_svset2_u64_var(svuint64x2_t tuple, uint64_t imm_index, svuint64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_u64,,)(tuple, imm_index, x);
+}
+
+svfloat16x2_t test_svset2_f16_var(svfloat16x2_t tuple, uint64_t imm_index, svfloat16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_f16,,)(tuple, imm_index, x);
+}
+
+svfloat32x2_t test_svset2_f32_var(svfloat32x2_t tuple, uint64_t imm_index, svfloat32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_f32,,)(tuple, imm_index, x);
+}
+
+svfloat64x2_t test_svset2_f64_var(svfloat64x2_t tuple, uint64_t imm_index, svfloat64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset2,_f64,,)(tuple, imm_index, x);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set3.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set3.c
new file mode 100644
index 000000000000..3b624588616b
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set3.c
@@ -0,0 +1,143 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8x3_t test_svset3_s8(svint8x3_t tuple, svint8_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_s8,,)(tuple, -1, x);
+}
+
+svint16x3_t test_svset3_s16(svint16x3_t tuple, svint16_t x)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_s16,,)(tuple, 3, x);
+}
+
+svint32x3_t test_svset3_s32(svint32x3_t tuple, svint32_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_s32,,)(tuple, -1, x);
+}
+
+svint64x3_t test_svset3_s64(svint64x3_t tuple, svint64_t x)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_s64,,)(tuple, 3, x);
+}
+
+svuint8x3_t test_svset3_u8(svuint8x3_t tuple, svuint8_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_u8,,)(tuple, -1, x);
+}
+
+svuint16x3_t test_svset3_u16(svuint16x3_t tuple, svuint16_t x)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_u16,,)(tuple, 3, x);
+}
+
+svuint32x3_t test_svset3_u32(svuint32x3_t tuple, svuint32_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_u32,,)(tuple, -1, x);
+}
+
+svuint64x3_t test_svset3_u64(svuint64x3_t tuple, svuint64_t x)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_u64,,)(tuple, 3, x);
+}
+
+svfloat16x3_t test_svset3_f16(svfloat16x3_t tuple, svfloat16_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_f16,,)(tuple, -1, x);
+}
+
+svfloat32x3_t test_svset3_f32(svfloat32x3_t tuple, svfloat32_t x)
+{
+  // expected-error at +1 {{argument value 3 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_f32,,)(tuple, 3, x);
+}
+
+svfloat64x3_t test_svset3_f64(svfloat64x3_t tuple, svfloat64_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 2]}}
+  return SVE_ACLE_FUNC(svset3,_f64,,)(tuple, -1, x);
+}
+
+svint8x3_t test_svset3_s8_var(svint8x3_t tuple, uint64_t imm_index, svint8_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_s8,,)(tuple, imm_index, x);
+}
+
+svint16x3_t test_svset3_s16_var(svint16x3_t tuple, uint64_t imm_index, svint16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_s16,,)(tuple, imm_index, x);
+}
+
+svint32x3_t test_svset3_s32_var(svint32x3_t tuple, uint64_t imm_index, svint32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_s32,,)(tuple, imm_index, x);
+}
+
+svint64x3_t test_svset3_s64_var(svint64x3_t tuple, uint64_t imm_index, svint64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_s64,,)(tuple, imm_index, x);
+}
+
+svuint8x3_t test_svset3_u8_var(svuint8x3_t tuple, uint64_t imm_index, svuint8_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_u8,,)(tuple, imm_index, x);
+}
+
+svuint16x3_t test_svset3_u16_var(svuint16x3_t tuple, uint64_t imm_index, svuint16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_u16,,)(tuple, imm_index, x);
+}
+
+svuint32x3_t test_svset3_u32_var(svuint32x3_t tuple, uint64_t imm_index, svuint32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_u32,,)(tuple, imm_index, x);
+}
+
+svuint64x3_t test_svset3_u64_var(svuint64x3_t tuple, uint64_t imm_index, svuint64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_u64,,)(tuple, imm_index, x);
+}
+
+svfloat16x3_t test_svset3_f16_var(svfloat16x3_t tuple, uint64_t imm_index, svfloat16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_f16,,)(tuple, imm_index, x);
+}
+
+svfloat32x3_t test_svset3_f32_var(svfloat32x3_t tuple, uint64_t imm_index, svfloat32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_f32,,)(tuple, imm_index, x);
+}
+
+svfloat64x3_t test_svset3_f64_var(svfloat64x3_t tuple, uint64_t imm_index, svfloat64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset3,_f64,,)(tuple, imm_index, x);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set4.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set4.c
new file mode 100644
index 000000000000..8cc19246d620
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_set4.c
@@ -0,0 +1,143 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8x4_t test_svset4_s8(svint8x4_t tuple, svint8_t x)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_s8,,)(tuple, 4, x);
+}
+
+svint16x4_t test_svset4_s16(svint16x4_t tuple, svint16_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_s16,,)(tuple, -1, x);
+}
+
+svint32x4_t test_svset4_s32(svint32x4_t tuple, svint32_t x)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_s32,,)(tuple, 4, x);
+}
+
+svint64x4_t test_svset4_s64(svint64x4_t tuple, svint64_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_s64,,)(tuple, -1, x);
+}
+
+svuint8x4_t test_svset4_u8(svuint8x4_t tuple, svuint8_t x)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_u8,,)(tuple, 4, x);
+}
+
+svuint16x4_t test_svset4_u16(svuint16x4_t tuple, svuint16_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_u16,,)(tuple, -1, x);
+}
+
+svuint32x4_t test_svset4_u32(svuint32x4_t tuple, svuint32_t x)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_u32,,)(tuple, 4, x);
+}
+
+svuint64x4_t test_svset4_u64(svuint64x4_t tuple, svuint64_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_u64,,)(tuple, -1, x);
+}
+
+svfloat16x4_t test_svset4_f16(svfloat16x4_t tuple, svfloat16_t x)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_f16,,)(tuple, 4, x);
+}
+
+svfloat32x4_t test_svset4_f32(svfloat32x4_t tuple, svfloat32_t x)
+{
+  // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_f32,,)(tuple, -1, x);
+}
+
+svfloat64x4_t test_svset4_f64(svfloat64x4_t tuple, svfloat64_t x)
+{
+  // expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svset4,_f64,,)(tuple, 4, x);
+}
+
+svint8x4_t test_svset4_s8_var(svint8x4_t tuple, uint64_t imm_index, svint8_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_s8,,)(tuple, imm_index, x);
+}
+
+svint16x4_t test_svset4_s16_var(svint16x4_t tuple, uint64_t imm_index, svint16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_s16,,)(tuple, imm_index, x);
+}
+
+svint32x4_t test_svset4_s32_var(svint32x4_t tuple, uint64_t imm_index, svint32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_s32,,)(tuple, imm_index, x);
+}
+
+svint64x4_t test_svset4_s64_var(svint64x4_t tuple, uint64_t imm_index, svint64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_s64,,)(tuple, imm_index, x);
+}
+
+svuint8x4_t test_svset4_u8_var(svuint8x4_t tuple, uint64_t imm_index, svuint8_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_u8,,)(tuple, imm_index, x);
+}
+
+svuint16x4_t test_svset4_u16_var(svuint16x4_t tuple, uint64_t imm_index, svuint16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_u16,,)(tuple, imm_index, x);
+}
+
+svuint32x4_t test_svset4_u32_var(svuint32x4_t tuple, uint64_t imm_index, svuint32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_u32,,)(tuple, imm_index, x);
+}
+
+svuint64x4_t test_svset4_u64_var(svuint64x4_t tuple, uint64_t imm_index, svuint64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_u64,,)(tuple, imm_index, x);
+}
+
+svfloat16x4_t test_svset4_f16_var(svfloat16x4_t tuple, uint64_t imm_index, svfloat16_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_f16,,)(tuple, imm_index, x);
+}
+
+svfloat32x4_t test_svset4_f32_var(svfloat32x4_t tuple, uint64_t imm_index, svfloat32_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_f32,,)(tuple, imm_index, x);
+}
+
+svfloat64x4_t test_svset4_f64_var(svfloat64x4_t tuple, uint64_t imm_index, svfloat64_t x)
+{
+  // expected-error-re at +1 {{argument to '{{.*}}' must be a constant integer}}
+  return SVE_ACLE_FUNC(svset4,_f64,,)(tuple, imm_index, x);
+}


        


More information about the cfe-commits mailing list