[llvm] [LLVM][InstSimplify] Add folds for SVE integer reduction intrinsics. (PR #167519)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 11 07:25:09 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Paul Walker (paulwalker-arm)
<details>
<summary>Changes</summary>
[andv, eorv, orv, s/uaddv, s/umaxv, s/uminv]
sve_reduce_##(none, ?) -> op's neutral value
sve_reduce_##(any, neutral) -> op's neutral value
[andv, orv, s/umaxv, s/uminv]
sve_reduce_##(all, splat(X)) -> X
[eorv]
sve_reduce_##(all, splat(X)) -> 0
NOTE: I did not spot any other simplifications for target specific intrinsics but they do exist in ConstantFold, which InstSimplify uses, so presumably this is allow?
---
Patch is 43.35 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/167519.diff
5 Files Affected:
- (modified) llvm/include/llvm/IR/Constant.h (+3)
- (modified) llvm/lib/Analysis/InstructionSimplify.cpp (+68)
- (modified) llvm/lib/IR/Constants.cpp (+17)
- (added) llvm/test/Transforms/InstSimplify/AArch64/aarch64-sve-reductions.ll (+912)
- (added) llvm/test/Transforms/InstSimplify/AArch64/lit.local.cfg (+2)
``````````diff
diff --git a/llvm/include/llvm/IR/Constant.h b/llvm/include/llvm/IR/Constant.h
index 0be1fc172ebd4..e8ce453559ed7 100644
--- a/llvm/include/llvm/IR/Constant.h
+++ b/llvm/include/llvm/IR/Constant.h
@@ -79,6 +79,9 @@ class Constant : public User {
/// Return true if the value is the smallest signed value.
LLVM_ABI bool isMinSignedValue() const;
+ /// Return true if the value is the largest signed value.
+ LLVM_ABI bool isMaxSignedValue() const;
+
/// Return true if this is a finite and non-zero floating-point scalar
/// constant or a fixed width vector constant with all finite and non-zero
/// elements.
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 2a0a6a2d302b1..852259e6bdda0 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -41,6 +41,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
@@ -6676,6 +6677,62 @@ static MinMaxOptResult OptimizeConstMinMax(const Constant *RHSConst,
return MinMaxOptResult::CannotOptimize;
}
+static Value *simplifySVEIntReduction(Intrinsic::ID IID, Type *ReturnType,
+ Value *Op0, Value *Op1) {
+ Constant *C0 = dyn_cast<Constant>(Op0);
+ Constant *C1 = dyn_cast<Constant>(Op1);
+ unsigned Width = ReturnType->getPrimitiveSizeInBits();
+
+ // All false predicate or reduction of neutral values ==> neutral result.
+ switch (IID) {
+ case Intrinsic::aarch64_sve_eorv:
+ case Intrinsic::aarch64_sve_orv:
+ case Intrinsic::aarch64_sve_saddv:
+ case Intrinsic::aarch64_sve_uaddv:
+ case Intrinsic::aarch64_sve_umaxv:
+ if ((C0 && C0->isNullValue()) || (C1 && C1->isNullValue()))
+ return ConstantInt::get(ReturnType, 0);
+ break;
+ case Intrinsic::aarch64_sve_andv:
+ case Intrinsic::aarch64_sve_uminv:
+ if ((C0 && C0->isNullValue()) || (C1 && C1->isAllOnesValue()))
+ return ConstantInt::get(ReturnType, APInt::getMaxValue(Width));
+ break;
+ case Intrinsic::aarch64_sve_smaxv:
+ if ((C0 && C0->isNullValue()) || (C1 && C1->isMinSignedValue()))
+ return ConstantInt::get(ReturnType, APInt::getSignedMinValue(Width));
+ break;
+ case Intrinsic::aarch64_sve_sminv:
+ if ((C0 && C0->isNullValue()) || (C1 && C1->isMaxSignedValue()))
+ return ConstantInt::get(ReturnType, APInt::getSignedMaxValue(Width));
+ break;
+ }
+
+ switch (IID) {
+ case Intrinsic::aarch64_sve_andv:
+ case Intrinsic::aarch64_sve_orv:
+ case Intrinsic::aarch64_sve_smaxv:
+ case Intrinsic::aarch64_sve_sminv:
+ case Intrinsic::aarch64_sve_umaxv:
+ case Intrinsic::aarch64_sve_uminv:
+ // sve_reduce_##(all, splat(X)) ==> X
+ if (C0 && C0->isAllOnesValue()) {
+ if (Value *SplatVal = getSplatValue(Op1)) {
+ assert(SplatVal->getType() == ReturnType && "Unexpected result type!");
+ return SplatVal;
+ }
+ }
+ break;
+ case Intrinsic::aarch64_sve_eorv:
+ // sve_reduce_xor(all, splat(X)) ==> 0
+ if (C0 && C0->isAllOnesValue())
+ return ConstantInt::get(ReturnType, 0);
+ break;
+ }
+
+ return nullptr;
+}
+
Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
Value *Op0, Value *Op1,
const SimplifyQuery &Q,
@@ -7033,6 +7090,17 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
break;
}
+
+ case Intrinsic::aarch64_sve_andv:
+ case Intrinsic::aarch64_sve_eorv:
+ case Intrinsic::aarch64_sve_orv:
+ case Intrinsic::aarch64_sve_saddv:
+ case Intrinsic::aarch64_sve_smaxv:
+ case Intrinsic::aarch64_sve_sminv:
+ case Intrinsic::aarch64_sve_uaddv:
+ case Intrinsic::aarch64_sve_umaxv:
+ case Intrinsic::aarch64_sve_uminv:
+ return simplifySVEIntReduction(IID, ReturnType, Op0, Op1);
default:
break;
}
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index cbce8bd736102..a3aa5e9571657 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -183,6 +183,23 @@ bool Constant::isMinSignedValue() const {
return false;
}
+bool Constant::isMaxSignedValue() const {
+ // Check for INT_MAX integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMaxValue(/*isSigned=*/true);
+
+ // Check for FP which are bitcasted from INT_MAX integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isMaxSignedValue();
+
+ // Check for splats of INT_MAX values.
+ if (getType()->isVectorTy())
+ if (const auto *SplatVal = getSplatValue())
+ return SplatVal->isMaxSignedValue();
+
+ return false;
+}
+
bool Constant::isNotMinSignedValue() const {
// Check for INT_MIN integers
if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
diff --git a/llvm/test/Transforms/InstSimplify/AArch64/aarch64-sve-reductions.ll b/llvm/test/Transforms/InstSimplify/AArch64/aarch64-sve-reductions.ll
new file mode 100644
index 0000000000000..a54d6044d04b1
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/AArch64/aarch64-sve-reductions.ll
@@ -0,0 +1,912 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -passes=instsimplify < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; ANDV
+;
+
+define i8 @andv_i8_no_active(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: define i8 @andv_i8_no_active(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: ret i8 -1
+;
+ %out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+ ret i8 %out
+}
+
+define i8 @andv_i8_splat_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i8 @andv_i8_splat_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 -1
+;
+ %out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 -1))
+ ret i8 %out
+}
+
+define i8 @andv_i8_splat_non_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i8 @andv_i8_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> zeroinitializer)
+; CHECK-NEXT: ret i8 [[OUT]]
+;
+ %out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> zeroinitializer)
+ ret i8 %out
+}
+
+define i8 @andv_i8_all_active_splat(i8 %a) #0 {
+; CHECK-LABEL: define i8 @andv_i8_all_active_splat(
+; CHECK-SAME: i8 [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 [[A]]
+;
+ %a.insert = insertelement <vscale x 16 x i8> poison, i8 %a, i8 0
+ %a.splat = shufflevector <vscale x 16 x i8> %a.insert, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> %a.splat)
+ ret i8 %out
+}
+
+define i16 @andv_i16_splat_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i16 @andv_i16_splat_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i16 -1
+;
+ %out = call i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 -1))
+ ret i16 %out
+}
+
+define i16 @andv_i16_splat_non_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i16 @andv_i16_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1> [[PG]], <vscale x 8 x i16> zeroinitializer)
+; CHECK-NEXT: ret i16 [[OUT]]
+;
+ %out = call i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> zeroinitializer)
+ ret i16 %out
+}
+
+define i32 @andv_i32_splat_neutral_val(<vscale x 4 x i1> %pg) #0 {
+; CHECK-LABEL: define i32 @andv_i32_splat_neutral_val(
+; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i32 -1
+;
+ %out = call i32 @llvm.aarch64.sve.andv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 -1))
+ ret i32 %out
+}
+
+define i32 @andv_i32_splat_non_neutral_val(<vscale x 4 x i1> %pg) #0 {
+; CHECK-LABEL: define i32 @andv_i32_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i32 @llvm.aarch64.sve.andv.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret i32 [[OUT]]
+;
+ %out = call i32 @llvm.aarch64.sve.andv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> zeroinitializer)
+ ret i32 %out
+}
+
+define i64 @andv_i64_splat_neutral_val(<vscale x 2 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @andv_i64_splat_neutral_val(
+; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i64 -1
+;
+ %out = call i64 @llvm.aarch64.sve.andv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 -1))
+ ret i64 %out
+}
+
+define i64 @andv_i64_splat_non_neutral_val(<vscale x 2 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @andv_i64_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i64 @llvm.aarch64.sve.andv.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> zeroinitializer)
+; CHECK-NEXT: ret i64 [[OUT]]
+;
+ %out = call i64 @llvm.aarch64.sve.andv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> zeroinitializer)
+ ret i64 %out
+}
+
+;
+; EORV
+;
+
+define i8 @eorv_i8_no_active(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: define i8 @eorv_i8_no_active(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 0
+;
+ %out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+ ret i8 %out
+}
+
+define i8 @eorv_i8_splat_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i8 @eorv_i8_splat_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 0
+;
+ %out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> zeroinitializer)
+ ret i8 %out
+}
+
+define i8 @eorv_i8_splat_non_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i8 @eorv_i8_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> splat (i8 1))
+; CHECK-NEXT: ret i8 [[OUT]]
+;
+ %out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 1))
+ ret i8 %out
+}
+
+define i8 @eorv_i8_all_active_splat(i8 %a) #0 {
+; CHECK-LABEL: define i8 @eorv_i8_all_active_splat(
+; CHECK-SAME: i8 [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 0
+;
+ %a.insert = insertelement <vscale x 16 x i8> poison, i8 %a, i8 0
+ %a.splat = shufflevector <vscale x 16 x i8> %a.insert, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> %a.splat)
+ ret i8 %out
+}
+
+define i16 @eorv_i16_splat_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i16 @eorv_i16_splat_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i16 0
+;
+ %out = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> zeroinitializer)
+ ret i16 %out
+}
+
+define i16 @eorv_i16_splat_non_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i16 @eorv_i16_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> [[PG]], <vscale x 8 x i16> splat (i16 1))
+; CHECK-NEXT: ret i16 [[OUT]]
+;
+ %out = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 1))
+ ret i16 %out
+}
+
+define i32 @eorv_i32_splat_neutral_val(<vscale x 4 x i1> %pg) #0 {
+; CHECK-LABEL: define i32 @eorv_i32_splat_neutral_val(
+; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i32 0
+;
+ %out = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> zeroinitializer)
+ ret i32 %out
+}
+
+define i32 @eorv_i32_splat_non_neutral_val(<vscale x 4 x i1> %pg) #0 {
+; CHECK-LABEL: define i32 @eorv_i32_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[OUT]]
+;
+ %out = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 1))
+ ret i32 %out
+}
+
+define i64 @eorv_i64_splat_neutral_val(<vscale x 2 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @eorv_i64_splat_neutral_val(
+; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i64 0
+;
+ %out = call i64 @llvm.aarch64.sve.eorv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> zeroinitializer)
+ ret i64 %out
+}
+
+define i64 @eorv_i64_splat_non_neutral_val(<vscale x 2 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @eorv_i64_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i64 @llvm.aarch64.sve.eorv.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> splat (i64 1))
+; CHECK-NEXT: ret i64 [[OUT]]
+;
+ %out = call i64 @llvm.aarch64.sve.eorv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 1))
+ ret i64 %out
+}
+
+;
+; ORV
+;
+
+define i8 @orv_i8_no_active(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: define i8 @orv_i8_no_active(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 0
+;
+ %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+ ret i8 %out
+}
+
+define i8 @orv_i8_splat_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i8 @orv_i8_splat_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 0
+;
+ %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> zeroinitializer)
+ ret i8 %out
+}
+
+define i8 @orv_i8_splat_non_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i8 @orv_i8_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> splat (i8 1))
+; CHECK-NEXT: ret i8 [[OUT]]
+;
+ %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 1))
+ ret i8 %out
+}
+
+define i8 @orv_i8_all_active_splat(i8 %a) #0 {
+; CHECK-LABEL: define i8 @orv_i8_all_active_splat(
+; CHECK-SAME: i8 [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i8 [[A]]
+;
+ %a.insert = insertelement <vscale x 16 x i8> poison, i8 %a, i8 0
+ %a.splat = shufflevector <vscale x 16 x i8> %a.insert, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> %a.splat)
+ ret i8 %out
+}
+
+define i16 @orv_i16_splat_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i16 @orv_i16_splat_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i16 0
+;
+ %out = call i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> zeroinitializer)
+ ret i16 %out
+}
+
+define i16 @orv_i16_splat_non_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i16 @orv_i16_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1> [[PG]], <vscale x 8 x i16> splat (i16 1))
+; CHECK-NEXT: ret i16 [[OUT]]
+;
+ %out = call i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 1))
+ ret i16 %out
+}
+
+define i32 @orv_i32_splat_neutral_val(<vscale x 4 x i1> %pg) #0 {
+; CHECK-LABEL: define i32 @orv_i32_splat_neutral_val(
+; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i32 0
+;
+ %out = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> zeroinitializer)
+ ret i32 %out
+}
+
+define i32 @orv_i32_splat_non_neutral_val(<vscale x 4 x i1> %pg) #0 {
+; CHECK-LABEL: define i32 @orv_i32_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret i32 [[OUT]]
+;
+ %out = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 1))
+ ret i32 %out
+}
+
+define i64 @orv_i64_splat_neutral_val(<vscale x 2 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @orv_i64_splat_neutral_val(
+; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i64 0
+;
+ %out = call i64 @llvm.aarch64.sve.orv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> zeroinitializer)
+ ret i64 %out
+}
+
+define i64 @orv_i64_splat_non_neutral_val(<vscale x 2 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @orv_i64_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i64 @llvm.aarch64.sve.orv.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> splat (i64 1))
+; CHECK-NEXT: ret i64 [[OUT]]
+;
+ %out = call i64 @llvm.aarch64.sve.orv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 1))
+ ret i64 %out
+}
+
+;
+; SADDV
+;
+
+define i64 @saddv_i8_no_active(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: define i64 @saddv_i8_no_active(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i64 0
+;
+ %out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+ ret i64 %out
+}
+
+define i64 @saddv_i8_splat_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @saddv_i8_splat_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i64 0
+;
+ %out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> zeroinitializer)
+ ret i64 %out
+}
+
+define i64 @saddv_i8_splat_non_neutral_val(<vscale x 16 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @saddv_i8_splat_non_neutral_val(
+; CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OUT:%.*]] = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> splat (i8 1))
+; CHECK-NEXT: ret i64 [[OUT]]
+;
+ %out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 1))
+ ret i64 %out
+}
+
+define i64 @saddv_i8_all_active_splat(i8 %a) #0 {
+; CHECK-LABEL: define i64 @saddv_i8_all_active_splat(
+; CHECK-SAME: i8 [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[A_INSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[A]], i8 0
+; CHECK-NEXT: [[A_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[A_INSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: [[OUT:%.*]] = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> [[A_SPLAT]])
+; CHECK-NEXT: ret i64 [[OUT]]
+;
+ %a.insert = insertelement <vscale x 16 x i8> poison, i8 %a, i8 0
+ %a.splat = shufflevector <vscale x 16 x i8> %a.insert, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> %a.splat)
+ ret i64 %out
+}
+
+define i64 @saddv_i16_splat_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @saddv_i16_splat_neutral_val(
+; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret i64 0
+;
+ %out = call i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> zeroinitializer)
+ ret i64 %out
+}
+
+define i64 @saddv_i16_splat_non_neutral_val(<vscale x 8 x i1> %pg) #0 {
+; CHECK-LABEL: define i64 @saddv_i16_splat_n...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/167519
More information about the llvm-commits
mailing list