[clang] [SVE2.1][Clang][LLVM]Int/FP reduce builtin in Clang and LLVM intrinsic (PR #69926)

via cfe-commits cfe-commits at lists.llvm.org
Mon Oct 23 06:26:35 PDT 2023


https://github.com/CarolineConcatto created https://github.com/llvm/llvm-project/pull/69926

This patch implements the builtins in Clang
and the LLVM-IR intrinsic for the following:

// Variants are also available for:
// _s8, _s16, _u16, _s32, _u32, _s64, _u64,
// _f16, _f32, _f64uint8x16_t svaddqv[_u8](svbool_t pg, svuint8_t zn);

// Variants are also available for:
// _s8, _u16, _s16, _u32, _s32, _u64, _s64
uint8x16_t svandqv[_u8](svbool_t pg, svuint8_t zn); uint8x16_t sveorqv[_u8](svbool_t pg, svuint8_t zn); uint8x16_t svorqv[_u8](svbool_t pg, svuint8_t zn);

// Variants are also available for:
// _s8, _u16, _s16, _u32, _s32, _u64, _s64;
uint8x16_t svmaxqv[_u8](svbool_t pg, svuint8_t zn); uint8x16_t svminqv[_u8](svbool_t pg, svuint8_t zn);

// Variants are also available for _f32, _f64
float16x8_t svmaxnmqv[_f16](svbool_t pg, svfloat16_t zn); float16x8_t svminnmqv[_f16](svbool_t pg, svfloat16_t zn);

According to the PR#257[1]

The reduction instruction uses scalable vectors as input and fixed vectors as output, therefore we changed SVEEmitter to emit fixed vector types in case the neon header(arm_neon.h) is not present.

[1]https://github.com/ARM-software/acle/pull/257

Co-author: Dinar Temirbulatov <dinar.temirbulatov at arm.com>

>From 9578865054e6fe83de496df7842fa991ba9c2541 Mon Sep 17 00:00:00 2001
From: Caroline Concatto <caroline.concatto at arm.com>
Date: Mon, 23 Oct 2023 12:52:48 +0000
Subject: [PATCH] [SVE2.1][Clang][LLVM]Int/FP reduce builtin in Clang and LLVM
 intrinsic

This patch implements the builtins in Clang
and the LLVM-IR intrinsic for the following:

// Variants are also available for:
// _s8, _s16, _u16, _s32, _u32, _s64, _u64,
// _f16, _f32, _f64uint8x16_t svaddqv[_u8](svbool_t pg, svuint8_t zn);

// Variants are also available for:
// _s8, _u16, _s16, _u32, _s32, _u64, _s64
uint8x16_t svandqv[_u8](svbool_t pg, svuint8_t zn);
uint8x16_t sveorqv[_u8](svbool_t pg, svuint8_t zn);
uint8x16_t svorqv[_u8](svbool_t pg, svuint8_t zn);

// Variants are also available for:
// _s8, _u16, _s16, _u32, _s32, _u64, _s64;
uint8x16_t svmaxqv[_u8](svbool_t pg, svuint8_t zn);
uint8x16_t svminqv[_u8](svbool_t pg, svuint8_t zn);

// Variants are also available for _f32, _f64
float16x8_t svmaxnmqv[_f16](svbool_t pg, svfloat16_t zn);
float16x8_t svminnmqv[_f16](svbool_t pg, svfloat16_t zn);

According to the PR#257[1]

The reduction instruction uses scalable vectors as input and fixed vectors
as output, therefore we changed SVEEmitter to emit fixed vector types in case
the neon header(arm_neon.h) is not present.

[1]https://github.com/ARM-software/acle/pull/257

Co-author: Dinar Temirbulatov <dinar.temirbulatov at arm.com>
---
 clang/include/clang/Basic/TargetBuiltins.h    |   2 +-
 clang/include/clang/Basic/arm_sve.td          |  17 +
 clang/include/clang/Basic/arm_sve_sme_incl.td |   2 +
 clang/lib/CodeGen/CGBuiltin.cpp               |   4 +
 .../acle_sve2p1_fp_reduce.c                   | 285 +++++++
 .../acle_sve2p1_int_reduce.c                  | 784 ++++++++++++++++++
 clang/utils/TableGen/SveEmitter.cpp           |  35 +-
 llvm/include/llvm/IR/IntrinsicsAArch64.td     |  21 +
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  26 +-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |  13 +-
 .../AArch64/sve2p1-intrinsics-fp-reduce.ll    | 189 +++++
 .../AArch64/sve2p1-intrinsics-int-reduce.ll   | 356 ++++++++
 12 files changed, 1715 insertions(+), 19 deletions(-)
 create mode 100644 clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_fp_reduce.c
 create mode 100644 clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_int_reduce.c
 create mode 100644 llvm/test/CodeGen/AArch64/sve2p1-intrinsics-fp-reduce.ll
 create mode 100644 llvm/test/CodeGen/AArch64/sve2p1-intrinsics-int-reduce.ll

diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index 8f7881abf26f7f4..c9f9cbec7493bfc 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -309,7 +309,7 @@ namespace clang {
     bool isTupleSet() const { return Flags & IsTupleSet; }
     bool isReadZA() const { return Flags & IsReadZA; }
     bool isWriteZA() const { return Flags & IsWriteZA; }
-
+    bool isReductionQV() const { return Flags & IsReductionQV; }
     uint64_t getBits() const { return Flags; }
     bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
   };
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index b5baafedd139602..e8fef1e7a8dfb0d 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -1859,6 +1859,23 @@ def SVBGRP   : SInst<"svbgrp[_{d}]",   "ddd", "UcUsUiUl", MergeNone, "aarch64_sv
 def SVBGRP_N : SInst<"svbgrp[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
 }
 
+// Standalone sve2.1 builtins
+let TargetGuard = "sve2p1" in {
+def SVORQV   : SInst<"svorqv[_{d}]", "{Pd", "csilUcUsUiUl", MergeNone, "aarch64_sve_orqv", [IsReductionQV]>;
+def SVEORQV  : SInst<"sveorqv[_{d}]", "{Pd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorqv", [IsReductionQV]>;
+def SVADDQV  : SInst<"svaddqv[_{d}]", "{Pd", "hfdcsilUcUsUiUl", MergeNone, "aarch64_sve_addqv", [IsReductionQV]>;
+def SVANDQV  : SInst<"svandqv[_{d}]", "{Pd", "csilUcUsUiUl", MergeNone, "aarch64_sve_andqv", [IsReductionQV]>;
+def SVSMAXQV : SInst<"svmaxqv[_{d}]", "{Pd", "csil", MergeNone, "aarch64_sve_smaxqv", [IsReductionQV]>;
+def SVUMAXQV : SInst<"svmaxqv[_{d}]", "{Pd", "UcUsUiUl", MergeNone, "aarch64_sve_umaxqv", [IsReductionQV]>;
+def SVSMINQV : SInst<"svminqv[_{d}]", "{Pd", "csil", MergeNone, "aarch64_sve_sminqv", [IsReductionQV]>;
+def SVUMINQV : SInst<"svminqv[_{d}]", "{Pd", "UcUsUiUl", MergeNone, "aarch64_sve_uminqv", [IsReductionQV]>;
+
+def SVFMAXNMQV: SInst<"svmaxnmqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fmaxnmqv", [IsReductionQV]>;
+def SVFMINNMQV: SInst<"svminnmqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fminnmqv", [IsReductionQV]>;
+def SVFMAXQV: SInst<"svmaxqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fmaxqv", [IsReductionQV]>;
+def SVFMINQV: SInst<"svminqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fminqv", [IsReductionQV]>;
+}
+
 let TargetGuard = "sve2p1" in {
 def SVFCLAMP   : SInst<"svclamp[_{d}]", "dddd", "hfd", MergeNone, "aarch64_sve_fclamp", [], []>;
 def SVPTRUE_COUNT  : SInst<"svptrue_{d}", "}v", "QcQsQiQl", MergeNone, "aarch64_sve_ptrue_{d}", [IsOverloadNone], []>;
diff --git a/clang/include/clang/Basic/arm_sve_sme_incl.td b/clang/include/clang/Basic/arm_sve_sme_incl.td
index 3a7a5b51b25801e..9fe497173b56ac6 100644
--- a/clang/include/clang/Basic/arm_sve_sme_incl.td
+++ b/clang/include/clang/Basic/arm_sve_sme_incl.td
@@ -128,6 +128,7 @@
 // Z: const pointer to uint64_t
 
 // Prototype modifiers added for SVE2p1
+// {: 128b vector
 // }: svcount_t
 
 class MergeType<int val, string suffix=""> {
@@ -224,6 +225,7 @@ def IsSharedZA                : FlagType<0x8000000000>;
 def IsPreservesZA             : FlagType<0x10000000000>;
 def IsReadZA                  : FlagType<0x20000000000>;
 def IsWriteZA                 : FlagType<0x40000000000>;
+def IsReductionQV             : FlagType<0x80000000000>;
 
 // These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
 class ImmCheckType<int val> {
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index e1211bb8949b665..86e77db4b914571 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -9834,6 +9834,10 @@ CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
   if (TypeFlags.isOverloadCvt())
     return {Ops[0]->getType(), Ops.back()->getType()};
 
+  if (TypeFlags.isReductionQV() && !ResultType->isScalableTy() &&
+      ResultType->isVectorTy())
+    return {ResultType, Ops[1]->getType()};
+
   assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
   return {DefaultType};
 }
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_fp_reduce.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_fp_reduce.c
new file mode 100644
index 000000000000000..e58cf4e49a37f92
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_fp_reduce.c
@@ -0,0 +1,285 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+// FADDQV
+
+// CHECK-LABEL: @test_svaddqv_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.addqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_f16u10__SVBool_tu13__SVFloat16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.addqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+float16x8_t test_svaddqv_f16(svbool_t pg, svfloat16_t op)
+{
+  return SVE_ACLE_FUNC(svaddqv,,_f16,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svaddqv_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.addqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_f32u10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.addqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+float32x4_t test_svaddqv_f32(svbool_t pg, svfloat32_t op)
+{
+  return SVE_ACLE_FUNC(svaddqv,,_f32,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svaddqv_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.addqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_f64u10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.addqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+float64x2_t test_svaddqv_f64(svbool_t pg, svfloat64_t op)
+{
+  return SVE_ACLE_FUNC(svaddqv,,_f64,)(pg, op);
+}
+
+
+// FMAXQV
+
+// CHECK-LABEL: @test_svmaxqv_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fmaxqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_f16u10__SVBool_tu13__SVFloat16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fmaxqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+float16x8_t test_svmaxqv_f16(svbool_t pg, svfloat16_t op)
+{
+  return SVE_ACLE_FUNC(svmaxqv,,_f16,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svmaxqv_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fmaxqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_f32u10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fmaxqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+float32x4_t test_svmaxqv_f32(svbool_t pg, svfloat32_t op)
+{
+  return SVE_ACLE_FUNC(svmaxqv,,_f32,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svmaxqv_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fmaxqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_f64u10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fmaxqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+float64x2_t test_svmaxqv_f64(svbool_t pg, svfloat64_t op)
+{
+  return SVE_ACLE_FUNC(svmaxqv,,_f64,)(pg, op);
+}
+
+
+// FMINQV
+
+// CHECK-LABEL: @test_svminqv_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fminqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_f16u10__SVBool_tu13__SVFloat16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fminqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+float16x8_t test_svminqv_f16(svbool_t pg, svfloat16_t op)
+{
+  return SVE_ACLE_FUNC(svminqv,,_f16,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svminqv_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fminqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_f32u10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fminqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+float32x4_t test_svminqv_f32(svbool_t pg, svfloat32_t op)
+{
+  return SVE_ACLE_FUNC(svminqv,,_f32,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svminqv_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fminqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_f64u10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fminqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+float64x2_t test_svminqv_f64(svbool_t pg, svfloat64_t op)
+{
+  return SVE_ACLE_FUNC(svminqv,,_f64,)(pg, op);
+}
+
+
+// FMAXNMQV
+
+// CHECK-LABEL: @test_svmaxnmqv_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fmaxnmqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z18test_svmaxnmqv_f16u10__SVBool_tu13__SVFloat16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fmaxnmqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+float16x8_t test_svmaxnmqv_f16(svbool_t pg, svfloat16_t op)
+{
+  return SVE_ACLE_FUNC(svmaxnmqv,,_f16,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svmaxnmqv_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fmaxnmqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z18test_svmaxnmqv_f32u10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fmaxnmqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+float32x4_t test_svmaxnmqv_f32(svbool_t pg, svfloat32_t op)
+{
+  return SVE_ACLE_FUNC(svmaxnmqv,,_f32,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svmaxnmqv_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fmaxnmqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z18test_svmaxnmqv_f64u10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fmaxnmqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+float64x2_t test_svmaxnmqv_f64(svbool_t pg, svfloat64_t op)
+{
+  return SVE_ACLE_FUNC(svmaxnmqv,,_f64,)(pg, op);
+}
+
+
+// FMINNMQV
+
+// CHECK-LABEL: @test_svminnmqv_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fminnmqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z18test_svminnmqv_f16u10__SVBool_tu13__SVFloat16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x half> @llvm.aarch64.sve.fminnmqv.v8f16.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x half> [[TMP1]]
+//
+float16x8_t test_svminnmqv_f16(svbool_t pg, svfloat16_t op)
+{
+  return SVE_ACLE_FUNC(svminnmqv,,_f16,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svminnmqv_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fminnmqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z18test_svminnmqv_f32u10__SVBool_tu13__SVFloat32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x float> @llvm.aarch64.sve.fminnmqv.v4f32.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x float> [[TMP1]]
+//
+float32x4_t test_svminnmqv_f32(svbool_t pg, svfloat32_t op)
+{
+  return SVE_ACLE_FUNC(svminnmqv,,_f32,)(pg, op);
+}
+
+// CHECK-LABEL: @test_svminnmqv_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fminnmqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z18test_svminnmqv_f64u10__SVBool_tu13__SVFloat64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x double> @llvm.aarch64.sve.fminnmqv.v2f64.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x double> [[TMP1]]
+//
+float64x2_t test_svminnmqv_f64(svbool_t pg, svfloat64_t op)
+{
+  return SVE_ACLE_FUNC(svminnmqv,,_f64,)(pg, op);
+}
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_int_reduce.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_int_reduce.c
new file mode 100644
index 000000000000000..d060339fe9a7fac
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_int_reduce.c
@@ -0,0 +1,784 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+
+// ADDQV
+
+// CHECK-LABEL: @test_svaddqv_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svaddqv_s8u10__SVBool_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_svaddqv_s8(svbool_t pg, svint8_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_s8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.addqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_s16u10__SVBool_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.addqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_svaddqv_s16(svbool_t pg, svint16_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_s16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.addqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_s32u10__SVBool_tu11__SVInt32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.addqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_svaddqv_s32(svbool_t pg, svint32_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_s32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_s64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.addqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_s64u10__SVBool_tu11__SVInt64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.addqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+int64x2_t test_svaddqv_s64(svbool_t pg, svint64_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_s64,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svaddqv_u8u10__SVBool_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_svaddqv_u8(svbool_t pg, svuint8_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_u8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.addqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_u16u10__SVBool_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.addqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_svaddqv_u16(svbool_t pg, svuint16_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_u16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.addqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_u32u10__SVBool_tu12__SVUint32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.addqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_svaddqv_u32(svbool_t pg, svuint32_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_u32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svaddqv_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.addqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svaddqv_u64u10__SVBool_tu12__SVUint64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.addqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+uint64x2_t test_svaddqv_u64(svbool_t pg, svuint64_t op1) {
+  return SVE_ACLE_FUNC(svaddqv,_u64,,)(pg, op1);
+}
+
+
+// ANDQV
+
+// CHECK-LABEL: @test_svandqv_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.andqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svandqv_s8u10__SVBool_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.andqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_svandqv_s8(svbool_t pg, svint8_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_s8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.andqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svandqv_s16u10__SVBool_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.andqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_svandqv_s16(svbool_t pg, svint16_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_s16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.andqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svandqv_s32u10__SVBool_tu11__SVInt32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.andqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_svandqv_s32(svbool_t pg, svint32_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_s32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_s64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.andqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svandqv_s64u10__SVBool_tu11__SVInt64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.andqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+int64x2_t test_svandqv_s64(svbool_t pg, svint64_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_s64,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.andqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svandqv_u8u10__SVBool_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.andqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_svandqv_u8(svbool_t pg, svuint8_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_u8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.andqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svandqv_u16u10__SVBool_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.andqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_svandqv_u16(svbool_t pg, svuint16_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_u16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.andqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svandqv_u32u10__SVBool_tu12__SVUint32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.andqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_svandqv_u32(svbool_t pg, svuint32_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_u32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svandqv_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.andqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svandqv_u64u10__SVBool_tu12__SVUint64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.andqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+uint64x2_t test_svandqv_u64(svbool_t pg, svuint64_t op1) {
+  return SVE_ACLE_FUNC(svandqv,_u64,,)(pg, op1);
+}
+
+
+// EORQV
+
+// CHECK-LABEL: @test_sveorqv_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.eorqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_sveorqv_s8u10__SVBool_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.eorqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_sveorqv_s8(svbool_t pg, svint8_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_s8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.eorqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_sveorqv_s16u10__SVBool_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.eorqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_sveorqv_s16(svbool_t pg, svint16_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_s16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_sveorqv_s32u10__SVBool_tu11__SVInt32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_sveorqv_s32(svbool_t pg, svint32_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_s32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_s64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.eorqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_sveorqv_s64u10__SVBool_tu11__SVInt64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.eorqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+int64x2_t test_sveorqv_s64(svbool_t pg, svint64_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_s64,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.eorqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_sveorqv_u8u10__SVBool_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.eorqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_sveorqv_u8(svbool_t pg, svuint8_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_u8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.eorqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_sveorqv_u16u10__SVBool_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.eorqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_sveorqv_u16(svbool_t pg, svuint16_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_u16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_sveorqv_u32u10__SVBool_tu12__SVUint32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_sveorqv_u32(svbool_t pg, svuint32_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_u32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_sveorqv_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.eorqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_sveorqv_u64u10__SVBool_tu12__SVUint64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.eorqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+uint64x2_t test_sveorqv_u64(svbool_t pg, svuint64_t op1) {
+  return SVE_ACLE_FUNC(sveorqv,_u64,,)(pg, op1);
+}
+
+
+// ORQV
+
+// CHECK-LABEL: @test_svorqv_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.orqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z14test_svorqv_s8u10__SVBool_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.orqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_svorqv_s8(svbool_t pg, svint8_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_s8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svorqv_s16u10__SVBool_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_svorqv_s16(svbool_t pg, svint16_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_s16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.orqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svorqv_s32u10__SVBool_tu11__SVInt32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.orqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_svorqv_s32(svbool_t pg, svint32_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_s32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_s64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.orqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svorqv_s64u10__SVBool_tu11__SVInt64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.orqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+int64x2_t test_svorqv_s64(svbool_t pg, svint64_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_s64,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.orqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z14test_svorqv_u8u10__SVBool_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.orqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_svorqv_u8(svbool_t pg, svuint8_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_u8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svorqv_u16u10__SVBool_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_svorqv_u16(svbool_t pg, svuint16_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_u16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.orqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svorqv_u32u10__SVBool_tu12__SVUint32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.orqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_svorqv_u32(svbool_t pg, svuint32_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_u32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svorqv_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.orqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svorqv_u64u10__SVBool_tu12__SVUint64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.orqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+uint64x2_t test_svorqv_u64(svbool_t pg, svuint64_t op1) {
+  return SVE_ACLE_FUNC(svorqv,_u64,,)(pg, op1);
+}
+
+
+// SMAXQV
+
+// CHECK-LABEL: @test_svmaxqv_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.smaxqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svmaxqv_s8u10__SVBool_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.smaxqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_svmaxqv_s8(svbool_t pg, svint8_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_s8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svmaxqv_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.smaxqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_s16u10__SVBool_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.smaxqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_svmaxqv_s16(svbool_t pg, svint16_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_s16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svmaxqv_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.smaxqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_s32u10__SVBool_tu11__SVInt32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.smaxqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_svmaxqv_s32(svbool_t pg, svint32_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_s32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svmaxqv_s64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.smaxqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_s64u10__SVBool_tu11__SVInt64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.smaxqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+int64x2_t test_svmaxqv_s64(svbool_t pg, svint64_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_s64,,)(pg, op1);
+}
+
+
+// UMAXQV
+
+// CHECK-LABEL: @test_svmaxqv_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.umaxqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svmaxqv_u8u10__SVBool_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.umaxqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_svmaxqv_u8(svbool_t pg, svuint8_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_u8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svmaxqv_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.umaxqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_u16u10__SVBool_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.umaxqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_svmaxqv_u16(svbool_t pg, svuint16_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_u16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svmaxqv_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.umaxqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_u32u10__SVBool_tu12__SVUint32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.umaxqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_svmaxqv_u32(svbool_t pg, svuint32_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_u32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svmaxqv_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.umaxqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svmaxqv_u64u10__SVBool_tu12__SVUint64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.umaxqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+uint64x2_t test_svmaxqv_u64(svbool_t pg, svuint64_t op1) {
+  return SVE_ACLE_FUNC(svmaxqv,_u64,,)(pg, op1);
+}
+
+
+// SMINQV
+
+// CHECK-LABEL: @test_svminqv_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.sminqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svminqv_s8u10__SVBool_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.sminqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_svminqv_s8(svbool_t pg, svint8_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_s8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svminqv_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.sminqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_s16u10__SVBool_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.sminqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_svminqv_s16(svbool_t pg, svint16_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_s16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svminqv_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.sminqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_s32u10__SVBool_tu11__SVInt32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.sminqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_svminqv_s32(svbool_t pg, svint32_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_s32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svminqv_s64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.sminqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_s64u10__SVBool_tu11__SVInt64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.sminqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+int64x2_t test_svminqv_s64(svbool_t pg, svint64_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_s64,,)(pg, op1);
+}
+
+
+// UMINQV
+
+// CHECK-LABEL: @test_svminqv_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.uminqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+// CPP-CHECK-LABEL: @_Z15test_svminqv_u8u10__SVBool_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <16 x i8> @llvm.aarch64.sve.uminqv.v16i8.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_svminqv_u8(svbool_t pg, svuint8_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_u8,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svminqv_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.uminqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_u16u10__SVBool_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <8 x i16> @llvm.aarch64.sve.uminqv.v8i16.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_svminqv_u16(svbool_t pg, svuint16_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_u16,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svminqv_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.uminqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_u32u10__SVBool_tu12__SVUint32_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.aarch64.sve.uminqv.v4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_svminqv_u32(svbool_t pg, svuint32_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_u32,,)(pg, op1);
+}
+
+// CHECK-LABEL: @test_svminqv_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.uminqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+// CPP-CHECK-LABEL: @_Z16test_svminqv_u64u10__SVBool_tu12__SVUint64_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i64> @llvm.aarch64.sve.uminqv.v2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]])
+// CPP-CHECK-NEXT:    ret <2 x i64> [[TMP1]]
+//
+uint64x2_t test_svminqv_u64(svbool_t pg, svuint64_t op1) {
+  return SVE_ACLE_FUNC(svminqv,_u64,,)(pg, op1);
+}
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index ab2b22233987a3c..3bfb66091a76dfd 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -91,6 +91,7 @@ class SVEType {
   bool isScalar() const { return NumVectors == 0; }
   bool isVector() const { return NumVectors > 0; }
   bool isScalableVector() const { return isVector() && IsScalable; }
+  bool isFixedLengthVector() const { return isVector() && !IsScalable; }
   bool isChar() const { return ElementBitwidth == 8; }
   bool isVoid() const { return Void & !Pointer; }
   bool isDefault() const { return DefaultType; }
@@ -449,7 +450,8 @@ std::string SVEType::builtin_str() const {
     return S;
   }
 
-  assert(isScalableVector() && "Unsupported type");
+  if (isFixedLengthVector())
+    return "V" + utostr(getNumElements() * NumVectors) + S;
   return "q" + utostr(getNumElements() * NumVectors) + S;
 }
 
@@ -466,6 +468,8 @@ std::string SVEType::str() const {
   else {
     if (isScalableVector() || isSvcount())
       S += "sv";
+    if (isFixedLengthVector())
+      S += "__sve_";
     if (!Signed && !isFloatingPoint())
       S += "u";
 
@@ -482,7 +486,7 @@ std::string SVEType::str() const {
 
     if (!isScalarPredicate() && !isPredicateVector() && !isSvcount())
       S += utostr(ElementBitwidth);
-    if (!isScalableVector() && isVector())
+    if (isFixedLengthVector())
       S += "x" + utostr(getNumElements());
     if (NumVectors > 1)
       S += "x" + utostr(NumVectors);
@@ -592,6 +596,11 @@ void SVEType::applyModifier(char Mod) {
     Bitwidth = 16;
     ElementBitwidth = 1;
     break;
+  case '{':
+    IsScalable = false;
+    Bitwidth = 128;
+    NumVectors = 1;
+    break;
   case 's':
   case 'a':
     Bitwidth = ElementBitwidth;
@@ -1224,7 +1233,27 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
 
   OS << "typedef __SVBFloat16_t svbfloat16_t;\n";
 
-  OS << "#include <arm_bf16.h>\n";
+  OS << "#include <arm_bf16.h>\n\n";
+
+  OS << "typedef __attribute__((vector_size (16))) int8_t __sve_int8x16_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) int16_t __sve_int16x8_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) int32_t __sve_int32x4_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) int64_t __sve_int64x2_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) uint8_t __sve_uint8x16_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) uint16_t "
+        "__sve_uint16x8_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) uint32_t "
+        "__sve_uint32x4_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) uint64_t "
+        "__sve_uint64x2_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) float16_t "
+        "__sve_float16x8_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) float32_t "
+        "__sve_float32x4_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) float64_t "
+        "__sve_float64x2_t;\n";
+  OS << "typedef __attribute__((vector_size (16))) bfloat16_t "
+        "__sve_bfloat16x8;\n";
 
   OS << "typedef __SVFloat32_t svfloat32_t;\n";
   OS << "typedef __SVFloat64_t svfloat64_t;\n";
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index a42e2c49cb477ba..55015bd6fe9f600 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1401,6 +1401,13 @@ class AdvSIMD_SVE_Reduce_Intrinsic
                llvm_anyvector_ty],
               [IntrNoMem]>;
 
+class AdvSIMD_SVE_V128_Reduce_Intrinsic
+  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+              [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
+               llvm_anyvector_ty],
+               [IntrNoMem]>;
+
+
 class AdvSIMD_SVE_SADDV_Reduce_Intrinsic
   : DefaultAttrsIntrinsic<[llvm_i64_ty],
               [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1686,6 +1693,15 @@ def int_aarch64_sve_sqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
 def int_aarch64_sve_uqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
 def int_aarch64_sve_uqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
 
+def int_aarch64_sve_orqv      : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_eorqv     : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_andqv     : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_smaxqv    : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_umaxqv    : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_sminqv    : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_uminqv    : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+
+
 // Shifts
 
 def int_aarch64_sve_asr      : AdvSIMD_Pred2VectorArg_Intrinsic;
@@ -1996,6 +2012,11 @@ def int_aarch64_sve_fmaxv   : AdvSIMD_SVE_Reduce_Intrinsic;
 def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_Reduce_Intrinsic;
 def int_aarch64_sve_fminv   : AdvSIMD_SVE_Reduce_Intrinsic;
 def int_aarch64_sve_fminnmv : AdvSIMD_SVE_Reduce_Intrinsic;
+def int_aarch64_sve_addqv : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_fmaxnmqv : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_fminnmqv : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_fmaxqv : AdvSIMD_SVE_V128_Reduce_Intrinsic;
+def int_aarch64_sve_fminqv : AdvSIMD_SVE_V128_Reduce_Intrinsic;
 
 //
 // Floating-point conversions
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index d599ac4689e5cb3..1cd84bc3d1c0edb 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3974,11 +3974,11 @@ def BFCLAMP_ZZZ : sve2p1_fclamp<"bfclamp", 0b00, ZPR16>;
 // SME2.1 or SVE2.1 instructions
 //===----------------------------------------------------------------------===//
 let Predicates = [HasSVE2p1_or_HasSME2p1] in {
-defm FADDQV   : sve2p1_fp_reduction_q<0b000, "faddqv">;
-defm FMAXNMQV : sve2p1_fp_reduction_q<0b100, "fmaxnmqv">;
-defm FMINNMQV : sve2p1_fp_reduction_q<0b101, "fminnmqv">;
-defm FMAXQV   : sve2p1_fp_reduction_q<0b110, "fmaxqv">;
-defm FMINQV   : sve2p1_fp_reduction_q<0b111, "fminqv">;
+defm FADDQV   : sve2p1_fp_reduction_q<0b000, "faddqv", int_aarch64_sve_addqv>;
+defm FMAXNMQV : sve2p1_fp_reduction_q<0b100, "fmaxnmqv", int_aarch64_sve_fmaxnmqv>;
+defm FMINNMQV : sve2p1_fp_reduction_q<0b101, "fminnmqv", int_aarch64_sve_fminnmqv>;
+defm FMAXQV   : sve2p1_fp_reduction_q<0b110, "fmaxqv", int_aarch64_sve_fmaxqv>;
+defm FMINQV   : sve2p1_fp_reduction_q<0b111, "fminqv", int_aarch64_sve_fminqv>;
 
 defm DUPQ_ZZI : sve2p1_dupq<"dupq">;
 def EXTQ_ZZI : sve2p1_extq<"extq">;
@@ -3986,14 +3986,14 @@ def EXTQ_ZZI : sve2p1_extq<"extq">;
 defm PMOV_PZI : sve2p1_vector_to_pred<"pmov">;
 defm PMOV_ZIP : sve2p1_pred_to_vector<"pmov">;
 
-defm ORQV_VPZ   : sve2p1_int_reduce_q<0b1100, "orqv">;
-defm EORQV_VPZ  : sve2p1_int_reduce_q<0b1101, "eorqv">;
-defm ANDQV_VPZ  : sve2p1_int_reduce_q<0b1110, "andqv">;
-defm ADDQV_VPZ  : sve2p1_int_reduce_q<0b0001, "addqv">;
-defm SMAXQV_VPZ : sve2p1_int_reduce_q<0b0100, "smaxqv">;
-defm UMAXQV_VPZ : sve2p1_int_reduce_q<0b0101, "umaxqv">;
-defm SMINQV_VPZ : sve2p1_int_reduce_q<0b0110, "sminqv">;
-defm UMINQV_VPZ : sve2p1_int_reduce_q<0b0111, "uminqv">;
+defm ORQV_VPZ   : sve2p1_int_reduce_q<0b1100, "orqv", int_aarch64_sve_orqv>;
+defm EORQV_VPZ  : sve2p1_int_reduce_q<0b1101, "eorqv", int_aarch64_sve_eorqv>;
+defm ANDQV_VPZ  : sve2p1_int_reduce_q<0b1110, "andqv", int_aarch64_sve_andqv>;
+defm ADDQV_VPZ  : sve2p1_int_reduce_q<0b0001, "addqv", int_aarch64_sve_addqv>;
+defm SMAXQV_VPZ : sve2p1_int_reduce_q<0b0100, "smaxqv", int_aarch64_sve_smaxqv>;
+defm UMAXQV_VPZ : sve2p1_int_reduce_q<0b0101, "umaxqv", int_aarch64_sve_umaxqv>;
+defm SMINQV_VPZ : sve2p1_int_reduce_q<0b0110, "sminqv", int_aarch64_sve_sminqv>;
+defm UMINQV_VPZ : sve2p1_int_reduce_q<0b0111, "uminqv", int_aarch64_sve_uminqv>;
 
 defm TBXQ_ZZZ : sve2_int_perm_tbx<"tbxq", 0b10, null_frag>;
 defm ZIPQ1_ZZZ : sve2p1_permute_vec_elems_q<0b000, "zipq1">;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 7bb457d9188210c..f624fb2f78926d5 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -9867,10 +9867,14 @@ class sve2p1_fp_reduction_q<bits<2> sz, bits<3> opc, string mnemonic,
   let mayRaiseFPException = 1;
 }
 
-multiclass sve2p1_fp_reduction_q<bits<3> opc, string mnemonic> {
+multiclass sve2p1_fp_reduction_q<bits<3> opc, string mnemonic, SDPatternOperator op> {
   def _H : sve2p1_fp_reduction_q<0b01, opc, mnemonic, ZPR16, "8h">;
   def _S : sve2p1_fp_reduction_q<0b10, opc, mnemonic, ZPR32, "4s">;
   def _D : sve2p1_fp_reduction_q<0b11, opc, mnemonic, ZPR64, "2d">;
+
+  def : SVE_2_Op_Pat<v8f16, op, nxv8i1, nxv8f16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_2_Op_Pat<v4f32, op, nxv4i1, nxv4f32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_2_Op_Pat<v2f64, op, nxv2i1, nxv2f64, !cast<Instruction>(NAME # _D)>;
 }
 
 
@@ -10036,11 +10040,16 @@ class sve2p1_int_reduce_q<bits<2> sz, bits<4> opc, string mnemonic,
   let hasSideEffects = 0;
 }
 
-multiclass sve2p1_int_reduce_q<bits<4> opc, string mnemonic> {
+multiclass sve2p1_int_reduce_q<bits<4> opc, string mnemonic, SDPatternOperator op> {
   def _B : sve2p1_int_reduce_q<0b00, opc, mnemonic, ZPR8,  "16b">;
   def _H : sve2p1_int_reduce_q<0b01, opc, mnemonic, ZPR16, "8h">;
   def _S : sve2p1_int_reduce_q<0b10, opc, mnemonic, ZPR32, "4s">;
   def _D : sve2p1_int_reduce_q<0b11, opc, mnemonic, ZPR64, "2d">;
+
+  def : SVE_2_Op_Pat<v16i8, op, nxv16i1, nxv16i8, !cast<Instruction>(NAME # _B)>;
+  def : SVE_2_Op_Pat<v8i16, op, nxv8i1, nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_2_Op_Pat<v4i32, op, nxv4i1, nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_2_Op_Pat<v2i64, op, nxv2i1, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
 
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-fp-reduce.ll
new file mode 100644
index 000000000000000..7957366fce3b05a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-fp-reduce.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve2p1 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sme2p1 < %s | FileCheck %s
+
+;
+; FMAXNMQV
+;
+
+define <8 x half> @fmaxnmqv_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
+; CHECK-LABEL: fmaxnmqv_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.aarch64.sve.fmaxnmqv.v8f16.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                  <vscale x 8 x half> %a)
+  ret <8 x half> %res
+}
+
+define <4 x float> @fmaxnmqv_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
+; CHECK-LABEL: fmaxnmqv_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.aarch64.sve.fmaxnmqv.v4f32.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                   <vscale x 4 x float> %a)
+  ret <4 x float> %res
+}
+
+define <2 x double> @fmaxnmqv_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
+; CHECK-LABEL: fmaxnmqv_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.aarch64.sve.fmaxnmqv.v2f64.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                    <vscale x 2 x double> %a)
+  ret <2 x double> %res
+}
+
+;
+; FMINNMQV
+;
+
+define <8 x half> @fminnmqv_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
+; CHECK-LABEL: fminnmqv_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.aarch64.sve.fminnmqv.v8f16.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                  <vscale x 8 x half> %a)
+  ret <8 x half> %res
+}
+
+define <4 x float> @fminnmqv_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
+; CHECK-LABEL: fminnmqv_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.aarch64.sve.fminnmqv.v4f32.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                   <vscale x 4 x float> %a)
+  ret <4 x float> %res
+}
+
+define <2 x double> @fminnmqv_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
+; CHECK-LABEL: fminnmqv_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.aarch64.sve.fminnmqv.v2f64.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                    <vscale x 2 x double> %a)
+  ret <2 x double> %res
+}
+
+;
+; FADDQV
+;
+
+define <8 x half> @faddqv_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
+; CHECK-LABEL: faddqv_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.aarch64.sve.addqv.v8f16.nxv8f16(<vscale x 8 x i1> %pg,
+                                                               <vscale x 8 x half> %a)
+  ret <8 x half> %res
+}
+
+define <4 x float> @faddqv_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
+; CHECK-LABEL: faddqv_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.aarch64.sve.addqv.v4f32.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                <vscale x 4 x float> %a)
+  ret <4 x float> %res
+}
+
+define <2 x double> @faddqv_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
+; CHECK-LABEL: faddqv_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.aarch64.sve.addqv.v2f64.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                 <vscale x 2 x double> %a)
+  ret <2 x double> %res
+}
+
+;
+; FMINQV
+;
+
+define <8 x half> @fminqv_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
+; CHECK-LABEL: fminqv_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.aarch64.sve.fminqv.v8f16.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                <vscale x 8 x half> %a)
+  ret <8 x half> %res
+}
+
+define <4 x float> @fminqv_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
+; CHECK-LABEL: fminqv_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.aarch64.sve.fminqv.v4f32.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                 <vscale x 4 x float> %a)
+  ret <4 x float> %res
+}
+
+define <2 x double> @fminqv_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
+; CHECK-LABEL: fminqv_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.aarch64.sve.fminqv.v2f64.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                  <vscale x 2 x double> %a)
+  ret <2 x double> %res
+}
+
+;
+; FMAXQV
+;
+
+define <8 x half> @fmaxqv_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
+; CHECK-LABEL: fmaxqv_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.aarch64.sve.fmaxqv.v8f16.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                <vscale x 8 x half> %a)
+  ret <8 x half> %res
+}
+
+define <4 x float> @fmaxqv_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
+; CHECK-LABEL: fmaxqv_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.aarch64.sve.fmaxqv.v4f32.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                 <vscale x 4 x float> %a)
+  ret <4 x float> %res
+}
+
+define <2 x double> @fmaxqv_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
+; CHECK-LABEL: fmaxqv_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.aarch64.sve.fmaxqv.v2f64.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                  <vscale x 2 x double> %a)
+  ret <2 x double> %res
+}
+
+declare <8 x half> @llvm.aarch64.sve.fmaxnmqv.v8f16.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
+declare <4 x float> @llvm.aarch64.sve.fmaxnmqv.v4f32.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
+declare <2 x double> @llvm.aarch64.sve.fmaxnmqv.v2f64.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
+declare <8 x half> @llvm.aarch64.sve.fminnmqv.v8f16.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
+declare <4 x float> @llvm.aarch64.sve.fminnmqv.v4f32.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
+declare <2 x double> @llvm.aarch64.sve.fminnmqv.v2f64.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
+declare <8 x half> @llvm.aarch64.sve.addqv.v8f16.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
+declare <4 x float> @llvm.aarch64.sve.addqv.v4f32.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
+declare <2 x double> @llvm.aarch64.sve.addqv.v2f64.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
+declare <8 x half> @llvm.aarch64.sve.fminqv.v8f16.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
+declare <4 x float> @llvm.aarch64.sve.fminqv.v4f32.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
+declare <2 x double> @llvm.aarch64.sve.fminqv.v2f64.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
+declare <8 x half> @llvm.aarch64.sve.fmaxqv.v8f16.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
+declare <4 x float> @llvm.aarch64.sve.fmaxqv.v4f32.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
+declare <2 x double> @llvm.aarch64.sve.fmaxqv.v2f64.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-int-reduce.ll
new file mode 100644
index 000000000000000..a730ba9c9320930
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-int-reduce.ll
@@ -0,0 +1,356 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve2p1 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sme2p1 < %s | FileCheck %s
+
+;
+; ORQV
+;
+
+define <16 x i8> @orqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: orqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.orqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @orqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: orqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @orqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: orqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.orqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @orqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: orqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.orqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; EORQV
+;
+
+define <16 x i8> @eorqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: eorqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eorqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.eorqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @eorqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: eorqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eorqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.eorqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @eorqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: eorqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eorqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @eorqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: eorqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eorqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.eorqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; ANDQV
+;
+
+define <16 x i8> @andqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: andqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    andqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.andqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @andqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: andqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    andqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.andqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @andqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: andqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    andqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.andqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @andqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: andqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    andqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.andqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; ADDQV
+;
+
+define <16 x i8> @addqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: addqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @addqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: addqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.addqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @addqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: addqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.addqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @addqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: addqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.addqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; SMAXQV
+;
+
+define <16 x i8> @smaxqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: smaxqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.smaxqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @smaxqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: smaxqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.smaxqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @smaxqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: smaxqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.smaxqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @smaxqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: smaxqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.smaxqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; UMAXQV
+;
+
+define <16 x i8> @umaxqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: umaxqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.umaxqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @umaxqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: umaxqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.umaxqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @umaxqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: umaxqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.umaxqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @umaxqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: umaxqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.umaxqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; SMINQV
+;
+
+define <16 x i8> @sminqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: sminqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.sminqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @sminqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: sminqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.sminqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @sminqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: sminqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.sminqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @sminqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: sminqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.sminqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+;
+; UMINQV
+;
+
+define <16 x i8> @uminqv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: uminqv_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminqv v0.16b, p0, z0.b
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.aarch64.sve.uminqv.v16i8.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @uminqv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: uminqv_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminqv v0.8h, p0, z0.h
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.aarch64.sve.uminqv.v8i16.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @uminqv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: uminqv_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminqv v0.4s, p0, z0.s
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.aarch64.sve.uminqv.v4i32.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <2 x i64> @uminqv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: uminqv_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminqv v0.2d, p0, z0.d
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.aarch64.sve.uminqv.v2i64.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a);
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.aarch64.sve.orqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.orqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.orqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.eorqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.eorqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.eorqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.andqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.andqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.andqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.andqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.addqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.addqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.addqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.smaxqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.smaxqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.smaxqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.smaxqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.umaxqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.umaxqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.umaxqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.umaxqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.sminqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.sminqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.sminqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.sminqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <16 x i8> @llvm.aarch64.sve.uminqv.v16i8.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <8 x i16> @llvm.aarch64.sve.uminqv.v8i16.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <4 x i32> @llvm.aarch64.sve.uminqv.v4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <2 x i64> @llvm.aarch64.sve.uminqv.v2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)



More information about the cfe-commits mailing list