[llvm] 4c7cfe3 - [RISCV] Remove intrinsic declares from costmodel tests. NFC
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 8 03:31:34 PDT 2025
Author: Luke Lau
Date: 2025-07-08T18:31:24+08:00
New Revision: 4c7cfe3fdb57c3d65c2edd4f2a3fdc4b1db3a8e1
URL: https://github.com/llvm/llvm-project/commit/4c7cfe3fdb57c3d65c2edd4f2a3fdc4b1db3a8e1
DIFF: https://github.com/llvm/llvm-project/commit/4c7cfe3fdb57c3d65c2edd4f2a3fdc4b1db3a8e1.diff
LOG: [RISCV] Remove intrinsic declares from costmodel tests. NFC
Declaring an intrinsic is no longer needed these days, and for intrinsic
tests we end up with a lot of them due to the various type overloads.
Added:
Modified:
llvm/test/Analysis/CostModel/RISCV/abs.ll
llvm/test/Analysis/CostModel/RISCV/active_lane_mask.ll
llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
llvm/test/Analysis/CostModel/RISCV/cttz_elts.ll
llvm/test/Analysis/CostModel/RISCV/fixed-vector-gather.ll
llvm/test/Analysis/CostModel/RISCV/fixed-vector-scatter.ll
llvm/test/Analysis/CostModel/RISCV/gep.ll
llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
llvm/test/Analysis/CostModel/RISCV/reduce-add.ll
llvm/test/Analysis/CostModel/RISCV/reduce-and.ll
llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
llvm/test/Analysis/CostModel/RISCV/reduce-or.ll
llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
llvm/test/Analysis/CostModel/RISCV/reduce-xor.ll
llvm/test/Analysis/CostModel/RISCV/rvv-expandload-compressstore.ll
llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
llvm/test/Analysis/CostModel/RISCV/vp-intrinsics.ll
Removed:
################################################################################
diff --git a/llvm/test/Analysis/CostModel/RISCV/abs.ll b/llvm/test/Analysis/CostModel/RISCV/abs.ll
index 7252716af8605..b1f93f3811580 100644
--- a/llvm/test/Analysis/CostModel/RISCV/abs.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/abs.ll
@@ -3,44 +3,6 @@
; Check that we don't crash querying costs when vectors are not enabled.
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64
-declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
-declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
-declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
-declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
-declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
-declare <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64>, i1)
-
-declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1)
-declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
-declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
-declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
-declare <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32>, i1)
-declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
-declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
-declare <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32>, i1)
-
-declare <2 x i16> @llvm.abs.v2i16(<2 x i16>, i1)
-declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
-declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
-declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
-declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
-declare <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16>, i1)
-declare <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16>, i1)
-declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
-declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
-declare <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16>, i1)
-
-declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1)
-declare <4 x i8> @llvm.abs.v4i8(<4 x i8>, i1)
-declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
-declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
-declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
-declare <64 x i8> @llvm.abs.v64i8(<64 x i8>, i1)
-declare <vscale x 8 x i8> @llvm.abs.nxv8i8(<vscale x 8 x i8>, i1)
-declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
-declare <vscale x 32 x i8> @llvm.abs.nxv32i8(<vscale x 32 x i8>, i1)
-declare <vscale x 64 x i8> @llvm.abs.nxv64i8(<vscale x 64 x i8>, i1)
-
define i32 @abs(i32 %arg) {
; CHECK-LABEL: 'abs'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call <2 x i64> @llvm.abs.v2i64(<2 x i64> undef, i1 false)
diff --git a/llvm/test/Analysis/CostModel/RISCV/active_lane_mask.ll b/llvm/test/Analysis/CostModel/RISCV/active_lane_mask.ll
index 7ebe14d98b21b..be2bb7374a318 100644
--- a/llvm/test/Analysis/CostModel/RISCV/active_lane_mask.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/active_lane_mask.ll
@@ -57,26 +57,3 @@ define void @get_lane_mask() {
ret void
}
-
-declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64)
-declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64)
-declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)
-declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64)
-declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64)
-declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32, i32)
-declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32, i32)
-declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32, i32)
-declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32, i32)
-declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64, i64)
-declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16, i16)
-declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64)
-declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64)
-declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64, i64)
-declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64)
-declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
-declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
-declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
-declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32, i32)
-declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64)
-declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16, i16)
diff --git a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
index 852c237cf2501..6c974afb76031 100644
--- a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
@@ -1577,32 +1577,3 @@ define void @fmuladd_f16() {
call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
ret void
}
-
-declare <1 x bfloat> @llvm.vp.frem.v1f16(<1 x bfloat>, <1 x bfloat>, <1 x i1>, i32)
-declare <2 x bfloat> @llvm.vp.frem.v2f16(<2 x bfloat>, <2 x bfloat>, <2 x i1>, i32)
-declare <4 x bfloat> @llvm.vp.frem.v4f16(<4 x bfloat>, <4 x bfloat>, <4 x i1>, i32)
-declare <8 x bfloat> @llvm.vp.frem.v8f16(<8 x bfloat>, <8 x bfloat>, <8 x i1>, i32)
-declare <16 x bfloat> @llvm.vp.frem.v16f16(<16 x bfloat>, <16 x bfloat>, <16 x i1>, i32)
-declare <1 x float> @llvm.vp.frem.v1f32(<1 x float>, <1 x float>, <1 x i1>, i32)
-declare <2 x float> @llvm.vp.frem.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
-declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
-declare <8 x float> @llvm.vp.frem.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
-declare <16 x float> @llvm.vp.frem.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
-declare <1 x double> @llvm.vp.frem.v1f64(<1 x double>, <1 x double>, <1 x i1>, i32)
-declare <2 x double> @llvm.vp.frem.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
-declare <4 x double> @llvm.vp.frem.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
-declare <8 x double> @llvm.vp.frem.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
-declare <vscale x 1 x bfloat> @llvm.vp.frem.nxv1f16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x bfloat> @llvm.vp.frem.nxv2f16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x bfloat> @llvm.vp.frem.nxv4f16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x bfloat> @llvm.vp.frem.nxv8f16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x bfloat> @llvm.vp.frem.nxv16f16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>, <vscale x 16 x i1>, i32)
-declare <vscale x 1 x float> @llvm.vp.frem.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x float> @llvm.vp.frem.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x float> @llvm.vp.frem.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x float> @llvm.vp.frem.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x float> @llvm.vp.frem.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32)
-declare <vscale x 1 x double> @llvm.vp.frem.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x double> @llvm.vp.frem.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.frem.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x double> @llvm.vp.frem.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
diff --git a/llvm/test/Analysis/CostModel/RISCV/cttz_elts.ll b/llvm/test/Analysis/CostModel/RISCV/cttz_elts.ll
index 6d3dfc3061388..094d73ddd0581 100644
--- a/llvm/test/Analysis/CostModel/RISCV/cttz_elts.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/cttz_elts.ll
@@ -131,19 +131,3 @@ define void @foo_vscale_range_2_16() vscale_range(2,16) {
ret void
}
-
-declare i64 @llvm.experimental.cttz.elts.i64.nxv2i1(<vscale x 2 x i1>, i1)
-declare i64 @llvm.experimental.cttz.elts.i64.nxv4i1(<vscale x 4 x i1>, i1)
-declare i64 @llvm.experimental.cttz.elts.i64.nxv8i1(<vscale x 8 x i1>, i1)
-declare i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1>, i1)
-declare i64 @llvm.experimental.cttz.elts.i64.nxv32i1(<vscale x 32 x i1>, i1)
-declare i64 @llvm.experimental.cttz.elts.i64.nxv64i1(<vscale x 64 x i1>, i1)
-declare i64 @llvm.experimental.cttz.elts.i64.nxv128i1(<vscale x 128 x i1>, i1)
-
-declare i32 @llvm.experimental.cttz.elts.i32.nxv2i1(<vscale x 2 x i1>, i1)
-declare i32 @llvm.experimental.cttz.elts.i32.nxv4i1(<vscale x 4 x i1>, i1)
-declare i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1>, i1)
-declare i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1>, i1)
-declare i32 @llvm.experimental.cttz.elts.i32.nxv32i1(<vscale x 32 x i1>, i1)
-declare i32 @llvm.experimental.cttz.elts.i32.nxv64i1(<vscale x 64 x i1>, i1)
-declare i32 @llvm.experimental.cttz.elts.i32.nxv128i1(<vscale x 128 x i1>, i1)
diff --git a/llvm/test/Analysis/CostModel/RISCV/fixed-vector-gather.ll b/llvm/test/Analysis/CostModel/RISCV/fixed-vector-gather.ll
index ae913456e344c..6eec7ed2f98ec 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fixed-vector-gather.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fixed-vector-gather.ll
@@ -185,46 +185,3 @@ define i32 @masked_gather() {
ret i32 0
}
-declare <8 x double> @llvm.masked.gather.v8f64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x double>)
-declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x double>)
-declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x double>)
-declare <1 x double> @llvm.masked.gather.v1f64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x double>)
-
-declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
-declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>)
-declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)
-declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>)
-declare <1 x float> @llvm.masked.gather.v1f32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x float>)
-
-declare <32 x half> @llvm.masked.gather.v32f16.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x half>)
-declare <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x half>)
-declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>)
-declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>)
-declare <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x half>)
-declare <1 x half> @llvm.masked.gather.v1f16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x half>)
-
-declare <8 x i64> @llvm.masked.gather.v8i64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i64>)
-declare <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i64>)
-declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>)
-declare <1 x i64> @llvm.masked.gather.v1i64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i64>)
-
-declare <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i32>)
-declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>)
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
-declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
-declare <1 x i32> @llvm.masked.gather.v1i32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i32>)
-
-declare <32 x i16> @llvm.masked.gather.v32i16.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i16>)
-declare <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i16>)
-declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>)
-declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
-declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>)
-declare <1 x i16> @llvm.masked.gather.v1i16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i16>)
-
-declare <64 x i8> @llvm.masked.gather.v64i8.v64p0(<64 x ptr>, i32, <64 x i1>, <64 x i8>)
-declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>)
-declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>)
-declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>)
-declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
-declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>)
-declare <1 x i8> @llvm.masked.gather.v1i8.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i8>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/fixed-vector-scatter.ll b/llvm/test/Analysis/CostModel/RISCV/fixed-vector-scatter.ll
index 9158c2c829135..338683e12654c 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fixed-vector-scatter.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fixed-vector-scatter.ll
@@ -185,46 +185,3 @@ define i32 @masked_scatter() {
ret i32 0
}
-declare void @llvm.masked.scatter.v8f64.v8p0(<8 x double>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1f64.v1p0(<1 x double>, <1 x ptr>, i32, <1 x i1>)
-
-declare void @llvm.masked.scatter.v16f32.v16p0(<16 x float>, <16 x ptr>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1f32.v1p0(<1 x float>, <1 x ptr>, i32, <1 x i1>)
-
-declare void @llvm.masked.scatter.v32f16.v32p0(<32 x half>, <32 x ptr>, i32, <32 x i1>)
-declare void @llvm.masked.scatter.v16f16.v16p0(<16 x half>, <16 x ptr>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1f16.v1p0(<1 x half>, <1 x ptr>, i32, <1 x i1>)
-
-declare void @llvm.masked.scatter.v8i64.v8p0(<8 x i64>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1i64.v1p0(<1 x i64>, <1 x ptr>, i32, <1 x i1>)
-
-declare void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>, <16 x ptr>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1i32.v1p0(<1 x i32>, <1 x ptr>, i32, <1 x i1>)
-
-declare void @llvm.masked.scatter.v32i16.v32p0(<32 x i16>, <32 x ptr>, i32, <32 x i1>)
-declare void @llvm.masked.scatter.v16i16.v16p0(<16 x i16>, <16 x ptr>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1i16.v1p0(<1 x i16>, <1 x ptr>, i32, <1 x i1>)
-
-declare void @llvm.masked.scatter.v64i8.v64p0(<64 x i8>, <64 x ptr>, i32, <64 x i1>)
-declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32, <32 x i1>)
-declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v1i8.v1p0(<1 x i8>, <1 x ptr>, i32, <1 x i1>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/gep.ll b/llvm/test/Analysis/CostModel/RISCV/gep.ll
index 99c74c4635fc7..f8c370050eaee 100644
--- a/llvm/test/Analysis/CostModel/RISCV/gep.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/gep.ll
@@ -396,14 +396,3 @@ define void @foldable_vector_uses(ptr %base, <2 x ptr> %base.vec) {
ret void
}
-declare <2 x i8> @llvm.masked.load.v2i8.p0(ptr, i32, <2 x i1>, <2 x i8>)
-declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>)
-declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
-declare <2 x i8> @llvm.vp.load.v2i8.p0(ptr, <2 x i1>, i32)
-declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.i64(ptr, i64, <2 x i1>, i32)
-
-declare void @llvm.masked.store.v2i8.p0(<2 x i8>, ptr, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>)
-declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
-declare void @llvm.vp.store.v2i8.p0(<2 x i8>, ptr, <2 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v2i8.i64(<2 x i8>, ptr, i64, <2 x i1>, i32)
diff --git a/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
index 5f2728f93d551..fe24594d806af 100644
--- a/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
@@ -891,340 +891,3 @@ define void @vp_cttz() {
ret void
}
-declare i16 @llvm.bswap.i16(i16)
-declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
-declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
-declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
-declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
-declare <vscale x 1 x i16> @llvm.bswap.nxv1i16(<vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.bswap.nxv2i16(<vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.bswap.nxv4i16(<vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.bswap.nxv16i16(<vscale x 16 x i16>)
-declare i32 @llvm.bswap.i32(i32)
-declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
-declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
-declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
-declare <16 x i32> @llvm.bswap.v16i32(<16 x i32>)
-declare <vscale x 1 x i32> @llvm.bswap.nxv1i32(<vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.bswap.nxv2i32(<vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.bswap.nxv8i32(<vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.bswap.nxv16i32(<vscale x 16 x i32>)
-declare i64 @llvm.bswap.i64(i64)
-declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
-declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
-declare <8 x i64> @llvm.bswap.v8i64(<8 x i64>)
-declare <16 x i64> @llvm.bswap.v16i64(<16 x i64>)
-declare <vscale x 1 x i64> @llvm.bswap.nxv1i64(<vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.bswap.nxv4i64(<vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.bswap.nxv8i64(<vscale x 8 x i64>)
-declare <vscale x 16 x i64> @llvm.bswap.nxv16i64(<vscale x 16 x i64>)
-
-declare i8 @llvm.bitreverse.i8(i8)
-declare <2 x i8> @llvm.bitreverse.v2i8(<2 x i8>)
-declare <4 x i8> @llvm.bitreverse.v4i8(<4 x i8>)
-declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>)
-declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>)
-declare <vscale x 1 x i8> @llvm.bitreverse.nxv1i8(<vscale x 1 x i8>)
-declare <vscale x 2 x i8> @llvm.bitreverse.nxv2i8(<vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.bitreverse.nxv4i8(<vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.bitreverse.nxv8i8(<vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8>)
-declare i16 @llvm.bitreverse.i16(i16)
-declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>)
-declare <4 x i16> @llvm.bitreverse.v4i16(<4 x i16>)
-declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
-declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)
-declare <vscale x 1 x i16> @llvm.bitreverse.nxv1i16(<vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.bitreverse.nxv2i16(<vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.bitreverse.nxv4i16(<vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.bitreverse.nxv16i16(<vscale x 16 x i16>)
-declare i32 @llvm.bitreverse.i32(i32)
-declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)
-declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)
-declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)
-declare <16 x i32> @llvm.bitreverse.v16i32(<16 x i32>)
-declare <vscale x 1 x i32> @llvm.bitreverse.nxv1i32(<vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.bitreverse.nxv2i32(<vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.bitreverse.nxv8i32(<vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.bitreverse.nxv16i32(<vscale x 16 x i32>)
-declare i64 @llvm.bitreverse.i64(i64)
-declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)
-declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)
-declare <8 x i64> @llvm.bitreverse.v8i64(<8 x i64>)
-declare <16 x i64> @llvm.bitreverse.v16i64(<16 x i64>)
-declare <vscale x 1 x i64> @llvm.bitreverse.nxv1i64(<vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.bitreverse.nxv4i64(<vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.bitreverse.nxv8i64(<vscale x 8 x i64>)
-declare <vscale x 16 x i64> @llvm.bitreverse.nxv16i64(<vscale x 16 x i64>)
-
-declare i8 @llvm.ctpop.i8(i8)
-declare <2 x i8> @llvm.ctpop.v2i8(<2 x i8>)
-declare <4 x i8> @llvm.ctpop.v4i8(<4 x i8>)
-declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>)
-declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
-declare <vscale x 1 x i8> @llvm.ctpop.nxv1i8(<vscale x 1 x i8>)
-declare <vscale x 2 x i8> @llvm.ctpop.nxv2i8(<vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.ctpop.nxv4i8(<vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.ctpop.nxv8i8(<vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.ctpop.nxv16i8(<vscale x 16 x i8>)
-declare i16 @llvm.ctpop.i16(i16)
-declare <2 x i16> @llvm.ctpop.v2i16(<2 x i16>)
-declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>)
-declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
-declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
-declare <vscale x 1 x i16> @llvm.ctpop.nxv1i16(<vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.ctpop.nxv2i16(<vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.ctpop.nxv4i16(<vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.ctpop.nxv8i16(<vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.ctpop.nxv16i16(<vscale x 16 x i16>)
-declare i32 @llvm.ctpop.i32(i32)
-declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
-declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
-declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
-declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>)
-declare <vscale x 1 x i32> @llvm.ctpop.nxv1i32(<vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.ctpop.nxv2i32(<vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.ctpop.nxv4i32(<vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.ctpop.nxv8i32(<vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32>)
-declare i64 @llvm.ctpop.i64(i64)
-declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
-declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
-declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>)
-declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>)
-declare <vscale x 1 x i64> @llvm.ctpop.nxv1i64(<vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.ctpop.nxv4i64(<vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64>)
-declare <vscale x 16 x i64> @llvm.ctpop.nxv16i64(<vscale x 16 x i64>)
-
-declare <2 x i16> @llvm.vp.bswap.v2i16(<2 x i16>, <2 x i1>, i32)
-declare <4 x i16> @llvm.vp.bswap.v4i16(<4 x i16>, <4 x i1>, i32)
-declare <8 x i16> @llvm.vp.bswap.v8i16(<8 x i16>, <8 x i1>, i32)
-declare <16 x i16> @llvm.vp.bswap.v16i16(<16 x i16>, <16 x i1>, i32)
-declare <vscale x 1 x i16> @llvm.vp.bswap.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i16> @llvm.vp.bswap.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i16> @llvm.vp.bswap.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i16> @llvm.vp.bswap.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i16> @llvm.vp.bswap.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i1>, i32)
-declare <2 x i32> @llvm.vp.bswap.v2i32(<2 x i32>, <2 x i1>, i32)
-declare <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32>, <4 x i1>, i32)
-declare <8 x i32> @llvm.vp.bswap.v8i32(<8 x i32>, <8 x i1>, i32)
-declare <16 x i32> @llvm.vp.bswap.v16i32(<16 x i32>, <16 x i1>, i32)
-declare <vscale x 1 x i32> @llvm.vp.bswap.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i32> @llvm.vp.bswap.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i32> @llvm.vp.bswap.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i32> @llvm.vp.bswap.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i32> @llvm.vp.bswap.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i1>, i32)
-declare <2 x i64> @llvm.vp.bswap.v2i64(<2 x i64>, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.bswap.v4i64(<4 x i64>, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.bswap.v8i64(<8 x i64>, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64>, <16 x i1>, i32)
-declare <vscale x 1 x i64> @llvm.vp.bswap.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.bswap.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.bswap.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.bswap.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.bswap.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i1>, i32)
-
-declare <2 x i8> @llvm.ctlz.v2i8(<2 x i8>, i1)
-declare <4 x i8> @llvm.ctlz.v4i8(<4 x i8>, i1)
-declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>, i1)
-declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1)
-declare <vscale x 1 x i8> @llvm.ctlz.nxv1i8(<vscale x 1 x i8>, i1)
-declare <vscale x 2 x i8> @llvm.ctlz.nxv2i8(<vscale x 2 x i8>, i1)
-declare <vscale x 4 x i8> @llvm.ctlz.nxv4i8(<vscale x 4 x i8>, i1)
-declare <vscale x 8 x i8> @llvm.ctlz.nxv8i8(<vscale x 8 x i8>, i1)
-declare <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8>, i1)
-declare <vscale x 32 x i8> @llvm.ctlz.nxv32i8(<vscale x 32 x i8>, i1)
-declare <vscale x 64 x i8> @llvm.ctlz.nxv64i8(<vscale x 64 x i8>, i1)
-declare <2 x i16> @llvm.ctlz.v2i16(<2 x i16>, i1)
-declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>, i1)
-declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1)
-declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1)
-declare <vscale x 1 x i16> @llvm.ctlz.nxv1i16(<vscale x 1 x i16>, i1)
-declare <vscale x 2 x i16> @llvm.ctlz.nxv2i16(<vscale x 2 x i16>, i1)
-declare <vscale x 4 x i16> @llvm.ctlz.nxv4i16(<vscale x 4 x i16>, i1)
-declare <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16>, i1)
-declare <vscale x 16 x i16> @llvm.ctlz.nxv16i16(<vscale x 16 x i16>, i1)
-declare <vscale x 32 x i16> @llvm.ctlz.nxv32i16(<vscale x 32 x i16>, i1)
-declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1)
-declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
-declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1)
-declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1)
-declare <vscale x 1 x i32> @llvm.ctlz.nxv1i32(<vscale x 1 x i32>, i1)
-declare <vscale x 2 x i32> @llvm.ctlz.nxv2i32(<vscale x 2 x i32>, i1)
-declare <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32>, i1)
-declare <vscale x 8 x i32> @llvm.ctlz.nxv8i32(<vscale x 8 x i32>, i1)
-declare <vscale x 16 x i32> @llvm.ctlz.nxv16i32(<vscale x 16 x i32>, i1)
-declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1)
-declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1)
-declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1)
-declare <16 x i64> @llvm.ctlz.v16i64(<16 x i64>, i1)
-declare <vscale x 1 x i64> @llvm.ctlz.nxv1i64(<vscale x 1 x i64>, i1)
-declare <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64>, i1)
-declare <vscale x 4 x i64> @llvm.ctlz.nxv4i64(<vscale x 4 x i64>, i1)
-declare <vscale x 8 x i64> @llvm.ctlz.nxv8i64(<vscale x 8 x i64>, i1)
-declare <vscale x 16 x i64> @llvm.ctlz.nxv16i64(<vscale x 16 x i64>, i1)
-
-declare <2 x i8> @llvm.cttz.v2i8(<2 x i8>, i1)
-declare <4 x i8> @llvm.cttz.v4i8(<4 x i8>, i1)
-declare <8 x i8> @llvm.cttz.v8i8(<8 x i8>, i1)
-declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>, i1)
-declare <vscale x 1 x i8> @llvm.cttz.nxv1i8(<vscale x 1 x i8>, i1)
-declare <vscale x 2 x i8> @llvm.cttz.nxv2i8(<vscale x 2 x i8>, i1)
-declare <vscale x 4 x i8> @llvm.cttz.nxv4i8(<vscale x 4 x i8>, i1)
-declare <vscale x 8 x i8> @llvm.cttz.nxv8i8(<vscale x 8 x i8>, i1)
-declare <vscale x 16 x i8> @llvm.cttz.nxv16i8(<vscale x 16 x i8>, i1)
-declare <vscale x 32 x i8> @llvm.cttz.nxv32i8(<vscale x 32 x i8>, i1)
-declare <vscale x 64 x i8> @llvm.cttz.nxv64i8(<vscale x 64 x i8>, i1)
-declare <2 x i16> @llvm.cttz.v2i16(<2 x i16>, i1)
-declare <4 x i16> @llvm.cttz.v4i16(<4 x i16>, i1)
-declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>, i1)
-declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1)
-declare <vscale x 1 x i16> @llvm.cttz.nxv1i16(<vscale x 1 x i16>, i1)
-declare <vscale x 2 x i16> @llvm.cttz.nxv2i16(<vscale x 2 x i16>, i1)
-declare <vscale x 4 x i16> @llvm.cttz.nxv4i16(<vscale x 4 x i16>, i1)
-declare <vscale x 8 x i16> @llvm.cttz.nxv8i16(<vscale x 8 x i16>, i1)
-declare <vscale x 16 x i16> @llvm.cttz.nxv16i16(<vscale x 16 x i16>, i1)
-declare <vscale x 32 x i16> @llvm.cttz.nxv32i16(<vscale x 32 x i16>, i1)
-declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1)
-declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1)
-declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1)
-declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>, i1)
-declare <vscale x 1 x i32> @llvm.cttz.nxv1i32(<vscale x 1 x i32>, i1)
-declare <vscale x 2 x i32> @llvm.cttz.nxv2i32(<vscale x 2 x i32>, i1)
-declare <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32>, i1)
-declare <vscale x 8 x i32> @llvm.cttz.nxv8i32(<vscale x 8 x i32>, i1)
-declare <vscale x 16 x i32> @llvm.cttz.nxv16i32(<vscale x 16 x i32>, i1)
-declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1)
-declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>, i1)
-declare <8 x i64> @llvm.cttz.v8i64(<8 x i64>, i1)
-declare <16 x i64> @llvm.cttz.v16i64(<16 x i64>, i1)
-declare <vscale x 1 x i64> @llvm.cttz.nxv1i64(<vscale x 1 x i64>, i1)
-declare <vscale x 2 x i64> @llvm.cttz.nxv2i64(<vscale x 2 x i64>, i1)
-declare <vscale x 4 x i64> @llvm.cttz.nxv4i64(<vscale x 4 x i64>, i1)
-declare <vscale x 8 x i64> @llvm.cttz.nxv8i64(<vscale x 8 x i64>, i1)
-declare <vscale x 16 x i64> @llvm.cttz.nxv16i64(<vscale x 16 x i64>, i1)
-
-declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32)
-declare <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i32)
-declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32)
-declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32)
-declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32)
-declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32)
-declare <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i1>, i32)
-declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32)
-declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32)
-declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32)
-declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32)
-declare <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i1>, i32)
-declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32)
-declare <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i1>, i32)
-
-declare <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i8> @llvm.vp.ctlz.nxv1i8(<vscale x 1 x i8>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.ctlz.nxv2i8(<vscale x 2 x i8>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.ctlz.nxv4i8(<vscale x 4 x i8>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.ctlz.nxv8i8(<vscale x 8 x i8>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.ctlz.nxv16i8(<vscale x 16 x i8>, i1 immarg, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i8> @llvm.vp.ctlz.nxv32i8(<vscale x 32 x i8>, i1 immarg, <vscale x 32 x i1>, i32)
-declare <vscale x 64 x i8> @llvm.vp.ctlz.nxv64i8(<vscale x 64 x i8>, i1 immarg, <vscale x 64 x i1>, i32)
-declare <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32)
-declare <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32)
-declare <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32)
-declare <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i16> @llvm.vp.ctlz.nxv1i16(<vscale x 1 x i16>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i16> @llvm.vp.ctlz.nxv2i16(<vscale x 2 x i16>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i16> @llvm.vp.ctlz.nxv4i16(<vscale x 4 x i16>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i16> @llvm.vp.ctlz.nxv8i16(<vscale x 8 x i16>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i16> @llvm.vp.ctlz.nxv16i16(<vscale x 16 x i16>, i1 immarg, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i16> @llvm.vp.ctlz.nxv32i16(<vscale x 32 x i16>, i1 immarg, <vscale x 32 x i1>, i32)
-declare <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32)
-declare <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32)
-declare <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32)
-declare <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i32> @llvm.vp.ctlz.nxv1i32(<vscale x 1 x i32>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i32> @llvm.vp.ctlz.nxv2i32(<vscale x 2 x i32>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i32> @llvm.vp.ctlz.nxv4i32(<vscale x 4 x i32>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i32> @llvm.vp.ctlz.nxv8i32(<vscale x 8 x i32>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i32> @llvm.vp.ctlz.nxv16i32(<vscale x 16 x i32>, i1 immarg, <vscale x 16 x i1>, i32)
-declare <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i64> @llvm.vp.ctlz.nxv1i64(<vscale x 1 x i64>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.ctlz.nxv2i64(<vscale x 2 x i64>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.ctlz.nxv4i64(<vscale x 4 x i64>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.ctlz.nxv8i64(<vscale x 8 x i64>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64>, i1 immarg, <vscale x 16 x i1>, i32)
-
-declare <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i8> @llvm.vp.cttz.nxv1i8(<vscale x 1 x i8>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.cttz.nxv2i8(<vscale x 2 x i8>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.cttz.nxv4i8(<vscale x 4 x i8>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.cttz.nxv8i8(<vscale x 8 x i8>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.cttz.nxv16i8(<vscale x 16 x i8>, i1 immarg, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i8> @llvm.vp.cttz.nxv32i8(<vscale x 32 x i8>, i1 immarg, <vscale x 32 x i1>, i32)
-declare <vscale x 64 x i8> @llvm.vp.cttz.nxv64i8(<vscale x 64 x i8>, i1 immarg, <vscale x 64 x i1>, i32)
-declare <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32)
-declare <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32)
-declare <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32)
-declare <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i16> @llvm.vp.cttz.nxv1i16(<vscale x 1 x i16>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i16> @llvm.vp.cttz.nxv2i16(<vscale x 2 x i16>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i16> @llvm.vp.cttz.nxv4i16(<vscale x 4 x i16>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i16> @llvm.vp.cttz.nxv8i16(<vscale x 8 x i16>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i16> @llvm.vp.cttz.nxv16i16(<vscale x 16 x i16>, i1 immarg, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i16> @llvm.vp.cttz.nxv32i16(<vscale x 32 x i16>, i1 immarg, <vscale x 32 x i1>, i32)
-declare <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32)
-declare <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32)
-declare <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32)
-declare <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i32> @llvm.vp.cttz.nxv1i32(<vscale x 1 x i32>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i32> @llvm.vp.cttz.nxv2i32(<vscale x 2 x i32>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i32> @llvm.vp.cttz.nxv4i32(<vscale x 4 x i32>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i32> @llvm.vp.cttz.nxv8i32(<vscale x 8 x i32>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i32> @llvm.vp.cttz.nxv16i32(<vscale x 16 x i32>, i1 immarg, <vscale x 16 x i1>, i32)
-declare <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
-declare <vscale x 1 x i64> @llvm.vp.cttz.nxv1i64(<vscale x 1 x i64>, i1 immarg, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.cttz.nxv2i64(<vscale x 2 x i64>, i1 immarg, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.cttz.nxv4i64(<vscale x 4 x i64>, i1 immarg, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.cttz.nxv8i64(<vscale x 8 x i64>, i1 immarg, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64>, i1 immarg, <vscale x 16 x i1>, i32)
diff --git a/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll b/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
index 10474d227851f..3be36f13e8731 100644
--- a/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
@@ -341,162 +341,3 @@ define void @umin() {
ret void
}
-declare i8 @llvm.smax.i8(i8, i8)
-declare <2 x i8> @llvm.smax.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.smax.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 1 x i8> @llvm.smax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
-declare <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.smax.i16(i16, i16)
-declare <2 x i16> @llvm.smax.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 1 x i16> @llvm.smax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.smax.i32(i32, i32)
-declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 1 x i32> @llvm.smax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.smax.i64(i64, i64)
-declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.smax.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.smax.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 1 x i64> @llvm.smax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.smin.i8(i8, i8)
-declare <2 x i8> @llvm.smin.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.smin.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 1 x i8> @llvm.smin.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
-declare <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.smin.i16(i16, i16)
-declare <2 x i16> @llvm.smin.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 1 x i16> @llvm.smin.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.smin.i32(i32, i32)
-declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 1 x i32> @llvm.smin.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.smin.i64(i64, i64)
-declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.smin.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.smin.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 1 x i64> @llvm.smin.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.umax.i8(i8, i8)
-declare <2 x i8> @llvm.umax.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.umax.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 1 x i8> @llvm.umax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
-declare <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.umax.i16(i16, i16)
-declare <2 x i16> @llvm.umax.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 1 x i16> @llvm.umax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.umax.i32(i32, i32)
-declare <2 x i32> @llvm.umax.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.umax.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 1 x i32> @llvm.umax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.umax.i64(i64, i64)
-declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.umax.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.umax.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 1 x i64> @llvm.umax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.umin.i8(i8, i8)
-declare <2 x i8> @llvm.umin.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.umin.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 1 x i8> @llvm.umin.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
-declare <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.umin.i16(i16, i16)
-declare <2 x i16> @llvm.umin.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 1 x i16> @llvm.umin.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
-declare <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.umin.i32(i32, i32)
-declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.umin.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 1 x i32> @llvm.umin.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
-declare <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.umin.i64(i64, i64)
-declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.umin.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.umin.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 1 x i64> @llvm.umin.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
-declare <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll b/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
index 0758eb204be48..20c6aef3c8e45 100644
--- a/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
@@ -462,219 +462,3 @@ define void @sshl.sat() {
call <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
ret void
}
-
-declare i8 @llvm.sadd.sat.i8(i8, i8)
-declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.sadd.sat.i16(i16, i16)
-declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.sadd.sat.i32(i32, i32)
-declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.sadd.sat.i64(i64, i64)
-declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.uadd.sat.i8(i8, i8)
-declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.uadd.sat.i16(i16, i16)
-declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.uadd.sat.i32(i32, i32)
-declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.uadd.sat.i64(i64, i64)
-declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.usub.sat.i8(i8, i8)
-declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.usub.sat.i16(i16, i16)
-declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.usub.sat.i32(i32, i32)
-declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.usub.sat.i64(i64, i64)
-declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.usub.sat.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.ssub.sat.i8(i8, i8)
-declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.ssub.sat.i16(i16, i16)
-declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.ssub.sat.i32(i32, i32)
-declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.ssub.sat.i64(i64, i64)
-declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.ushl.sat.i8(i8, i8)
-declare <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.ushl.sat.i16(i16, i16)
-declare <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.ushl.sat.i32(i32, i32)
-declare <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.ushl.sat.i64(i64, i64)
-declare <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
-
-declare i8 @llvm.sshl.sat.i8(i8, i8)
-declare <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8>, <2 x i8>)
-declare <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8>, <4 x i8>)
-declare <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8>, <16 x i8>)
-declare <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare i16 @llvm.sshl.sat.i16(i16, i16)
-declare <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16>, <2 x i16>)
-declare <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16>, <8 x i16>)
-declare <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16>, <16 x i16>)
-declare <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
-declare i32 @llvm.sshl.sat.i32(i32, i32)
-declare <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32>, <8 x i32>)
-declare <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32>, <16 x i32>)
-declare <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
-declare i64 @llvm.sshl.sat.i64(i64, i64)
-declare <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64>, <4 x i64>)
-declare <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64>, <8 x i64>)
-declare <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64>, <16 x i64>)
-declare <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll b/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
index e6f53d4429c79..892277a2d5740 100644
--- a/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
@@ -101,43 +101,3 @@ entry:
ret void
}
-
-declare <2 x i8> @llvm.masked.load.v2i8.p0(ptr, i32, <2 x i1>, <2 x i8>)
-declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
-declare <2 x i16> @llvm.masked.load.v2i16.p0(ptr, i32, <2 x i1>, <2 x i16>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
-declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
-declare <2 x i64> @llvm.masked.load.v2i64.p0(ptr, i32, <2 x i1>, <2 x i64>)
-declare <4 x i64> @llvm.masked.load.v4i64.p0(ptr, i32, <4 x i1>, <4 x i64>)
-declare <2 x half> @llvm.masked.load.v2f16.p0(ptr, i32, <2 x i1>, <2 x half>)
-declare <4 x half> @llvm.masked.load.v4f16.p0(ptr, i32, <4 x i1>, <4 x half>)
-declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32, <8 x i1>, <8 x half>)
-declare <32 x half> @llvm.masked.load.v32f16.p0(ptr, i32, <32 x i1>, <32 x half>)
-declare <2 x float> @llvm.masked.load.v2f32.p0(ptr, i32, <2 x i1>, <2 x float>)
-declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>)
-declare <2 x double> @llvm.masked.load.v2f64.p0(ptr, i32, <2 x i1>, <2 x double>)
-
-
-declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
-declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64.p0(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
-declare <vscale x 2 x half> @llvm.masked.load.nxv2f16.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 4 x half> @llvm.masked.load.nxv4f16.p0(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
-declare <vscale x 32 x half> @llvm.masked.load.nxv32f16.p0(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x half>)
-declare <vscale x 2 x float> @llvm.masked.load.nxv2f32.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
-declare <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-add.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-add.ll
index 6032ae01aa718..23ee689937de1 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-add.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-add.ll
@@ -173,44 +173,3 @@ define i32 @reduce_i64(i32 %arg) {
%V128 = call i64 @llvm.vector.reduce.add.v128i64(<128 x i64> undef)
ret i32 undef
}
-
-declare i1 @llvm.vector.reduce.add.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.add.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.add.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.add.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.add.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.add.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.add.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.add.v128i1(<128 x i1>)
-declare i8 @llvm.vector.reduce.add.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.add.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.add.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.add.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.add.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.add.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.add.v128i64(<128 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-and.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-and.ll
index dc6a582df133b..eabbe1d43f117 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-and.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-and.ll
@@ -216,47 +216,3 @@ define i32 @reduce_i64(i32 %arg) {
%V128 = call i64 @llvm.vector.reduce.and.v128i64(<128 x i64> undef)
ret i32 undef
}
-
-declare i1 @llvm.vector.reduce.and.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.and.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.and.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.and.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.and.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.and.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.and.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.and.v128i1(<128 x i1>)
-declare i1 @llvm.vector.reduce.and.v256i1(<256 x i1>)
-declare i1 @llvm.vector.reduce.and.v512i1(<512 x i1>)
-declare i1 @llvm.vector.reduce.and.v1024i1(<1024 x i1>)
-declare i8 @llvm.vector.reduce.and.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.and.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.and.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.and.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.and.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.and.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.and.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.and.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.and.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.and.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.and.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.and.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.and.v128i64(<128 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
index b14c60012077d..d937d066d6ebe 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
@@ -55,13 +55,6 @@ call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
ret float undef
}
-declare float @llvm.vector.reduce.fmaximum.v2f32(<2 x float>)
-declare float @llvm.vector.reduce.fmaximum.v4f32(<4 x float>)
-declare float @llvm.vector.reduce.fmaximum.v8f32(<8 x float>)
-declare float @llvm.vector.reduce.fmaximum.v16f32(<16 x float>)
-declare float @llvm.vector.reduce.fmaximum.v32f32(<32 x float>)
-declare float @llvm.vector.reduce.fmaximum.v64f32(<64 x float>)
-declare float @llvm.vector.reduce.fmaximum.v128f32(<128 x float>)
define double @reduce_fmaximum_f64(double %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f64'
@@ -108,9 +101,3 @@ call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
ret double undef
}
-declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>)
-declare double @llvm.vector.reduce.fmaximum.v4f64(<4 x double>)
-declare double @llvm.vector.reduce.fmaximum.v8f64(<8 x double>)
-declare double @llvm.vector.reduce.fmaximum.v16f64(<16 x double>)
-declare double @llvm.vector.reduce.fmaximum.v32f64(<32 x double>)
-declare double @llvm.vector.reduce.fmaximum.v64f64(<64 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
index 2172a85bc46aa..397f25c2599b1 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
@@ -34,13 +34,6 @@ define float @reduce_fmaximum_f32(float %arg) {
%V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
ret float undef
}
-declare float @llvm.vector.reduce.fminimum.v2f32(<2 x float>)
-declare float @llvm.vector.reduce.fminimum.v4f32(<4 x float>)
-declare float @llvm.vector.reduce.fminimum.v8f32(<8 x float>)
-declare float @llvm.vector.reduce.fminimum.v16f32(<16 x float>)
-declare float @llvm.vector.reduce.fminimum.v32f32(<32 x float>)
-declare float @llvm.vector.reduce.fminimum.v64f32(<64 x float>)
-declare float @llvm.vector.reduce.fminimum.v128f32(<128 x float>)
define double @reduce_fmaximum_f64(double %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f64'
@@ -69,9 +62,3 @@ define double @reduce_fmaximum_f64(double %arg) {
%V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
ret double undef
}
-declare double @llvm.vector.reduce.fminimum.v2f64(<2 x double>)
-declare double @llvm.vector.reduce.fminimum.v4f64(<4 x double>)
-declare double @llvm.vector.reduce.fminimum.v8f64(<8 x double>)
-declare double @llvm.vector.reduce.fminimum.v16f64(<16 x double>)
-declare double @llvm.vector.reduce.fminimum.v32f64(<32 x double>)
-declare double @llvm.vector.reduce.fminimum.v64f64(<64 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
index 5c9303af31747..1370beb880d03 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-max.ll
@@ -343,85 +343,3 @@ define i32 @reduce_smax_i64(i32 %arg) {
%V128 = call i64 @llvm.vector.reduce.smax.v128i64(<128 x i64> undef)
ret i32 undef
}
-
-
-declare i1 @llvm.vector.reduce.umax.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.umax.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.umax.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.umax.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.umax.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.umax.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.umax.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.umax.v128i1(<128 x i1>)
-declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.umax.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.umax.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.umax.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.umax.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.umax.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.umax.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.umax.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.umax.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.umax.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.umax.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.umax.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.umax.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.umax.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.umax.v128i64(<128 x i64>)
-declare i1 @llvm.vector.reduce.smax.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.smax.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.smax.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.smax.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.smax.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.smax.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.smax.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.smax.v128i1(<128 x i1>)
-declare i8 @llvm.vector.reduce.smax.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.smax.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.smax.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.smax.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.smax.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.smax.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.smax.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.smax.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.smax.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.smax.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.smax.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.smax.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.smax.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.smax.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.smax.v128i64(<128 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
index 9875d3e585811..7acf76b53bb2d 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-min.ll
@@ -343,84 +343,3 @@ define i32 @reduce_smin_i64(i32 %arg) {
%V128 = call i64 @llvm.vector.reduce.smin.v128i64(<128 x i64> undef)
ret i32 undef
}
-
-declare i1 @llvm.vector.reduce.umin.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.umin.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.umin.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.umin.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.umin.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.umin.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.umin.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.umin.v128i1(<128 x i1>)
-declare i8 @llvm.vector.reduce.umin.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.umin.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.umin.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.umin.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.umin.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.umin.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.umin.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.umin.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.umin.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.umin.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.umin.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.umin.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.umin.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.umin.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.umin.v128i64(<128 x i64>)
-declare i1 @llvm.vector.reduce.smin.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.smin.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.smin.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.smin.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.smin.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.smin.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.smin.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.smin.v128i1(<128 x i1>)
-declare i8 @llvm.vector.reduce.smin.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.smin.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.smin.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.smin.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.smin.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.smin.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.smin.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.smin.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.smin.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.smin.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.smin.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.smin.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.smin.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.smin.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.smin.v128i64(<128 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-or.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-or.ll
index b0e90253b2810..db7d4ddf4c5ae 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-or.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-or.ll
@@ -182,47 +182,3 @@ define i32 @reduce_i64(i32 %arg) {
%V128 = call i64 @llvm.vector.reduce.or.v128i64(<128 x i64> undef)
ret i32 undef
}
-
-declare i1 @llvm.vector.reduce.or.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.or.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.or.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.or.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.or.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.or.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.or.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.or.v128i1(<128 x i1>)
-declare i1 @llvm.vector.reduce.or.v256i1(<256 x i1>)
-declare i1 @llvm.vector.reduce.or.v512i1(<512 x i1>)
-declare i1 @llvm.vector.reduce.or.v1024i1(<1024 x i1>)
-declare i8 @llvm.vector.reduce.or.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.or.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.or.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.or.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.or.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.or.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.or.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.or.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.or.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.or.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.or.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.or.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.or.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.or.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.or.v128i64(<128 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
index 8ae387f48ccaa..6f631b2a0aaa9 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-fp.ll
@@ -4,8 +4,6 @@
; RUN: opt < %s -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output | FileCheck %s --check-prefix=SIZE
; RUN: opt < %s -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output | FileCheck %s --check-prefix=SIZE
-declare half @llvm.vector.reduce.fadd.nxv1f16(half, <vscale x 1 x half>)
-
define half @vreduce_fadd_nxv1f16(<vscale x 1 x half> %v, half %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv1f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call reassoc half @llvm.vector.reduce.fadd.nxv1f16(half %s, <vscale x 1 x half> %v)
@@ -32,8 +30,6 @@ define half @vreduce_ord_fadd_nxv1f16(<vscale x 1 x half> %v, half %s) {
ret half %red
}
-declare half @llvm.vector.reduce.fadd.nxv2f16(half, <vscale x 2 x half>)
-
define half @vreduce_fadd_nxv2f16(<vscale x 2 x half> %v, half %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv2f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call reassoc half @llvm.vector.reduce.fadd.nxv2f16(half %s, <vscale x 2 x half> %v)
@@ -60,8 +56,6 @@ define half @vreduce_ord_fadd_nxv2f16(<vscale x 2 x half> %v, half %s) {
ret half %red
}
-declare half @llvm.vector.reduce.fadd.nxv4f16(half, <vscale x 4 x half>)
-
define half @vreduce_fadd_nxv4f16(<vscale x 4 x half> %v, half %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv4f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call reassoc half @llvm.vector.reduce.fadd.nxv4f16(half %s, <vscale x 4 x half> %v)
@@ -88,8 +82,6 @@ define half @vreduce_ord_fadd_nxv4f16(<vscale x 4 x half> %v, half %s) {
ret half %red
}
-declare float @llvm.vector.reduce.fadd.nxv1f32(float, <vscale x 1 x float>)
-
define float @vreduce_fadd_nxv1f32(<vscale x 1 x float> %v, float %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv1f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call reassoc float @llvm.vector.reduce.fadd.nxv1f32(float %s, <vscale x 1 x float> %v)
@@ -148,8 +140,6 @@ define float @vreduce_ord_fwadd_nxv1f32(<vscale x 1 x half> %v, float %s) {
ret float %red
}
-declare float @llvm.vector.reduce.fadd.nxv2f32(float, <vscale x 2 x float>)
-
define float @vreduce_fadd_nxv2f32(<vscale x 2 x float> %v, float %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv2f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call reassoc float @llvm.vector.reduce.fadd.nxv2f32(float %s, <vscale x 2 x float> %v)
@@ -208,8 +198,6 @@ define float @vreduce_ord_fwadd_nxv2f32(<vscale x 2 x half> %v, float %s) {
ret float %red
}
-declare float @llvm.vector.reduce.fadd.nxv4f32(float, <vscale x 4 x float>)
-
define float @vreduce_fadd_nxv4f32(<vscale x 4 x float> %v, float %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv4f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float %s, <vscale x 4 x float> %v)
@@ -268,8 +256,6 @@ define float @vreduce_ord_fwadd_nxv4f32(<vscale x 4 x half> %v, float %s) {
ret float %red
}
-declare double @llvm.vector.reduce.fadd.nxv1f64(double, <vscale x 1 x double>)
-
define double @vreduce_fadd_nxv1f64(<vscale x 1 x double> %v, double %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv1f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call reassoc double @llvm.vector.reduce.fadd.nxv1f64(double %s, <vscale x 1 x double> %v)
@@ -328,8 +314,6 @@ define double @vreduce_ord_fwadd_nxv1f64(<vscale x 1 x float> %v, double %s) {
ret double %red
}
-declare double @llvm.vector.reduce.fadd.nxv2f64(double, <vscale x 2 x double>)
-
define double @vreduce_fadd_nxv2f64(<vscale x 2 x double> %v, double %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv2f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call reassoc double @llvm.vector.reduce.fadd.nxv2f64(double %s, <vscale x 2 x double> %v)
@@ -388,8 +372,6 @@ define double @vreduce_ord_fwadd_nxv2f64(<vscale x 2 x float> %v, double %s) {
ret double %red
}
-declare double @llvm.vector.reduce.fadd.nxv4f64(double, <vscale x 4 x double>)
-
define double @vreduce_fadd_nxv4f64(<vscale x 4 x double> %v, double %s) {
; CHECK-LABEL: 'vreduce_fadd_nxv4f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call reassoc double @llvm.vector.reduce.fadd.nxv4f64(double %s, <vscale x 4 x double> %v)
@@ -448,8 +430,6 @@ define double @vreduce_ord_fwadd_nxv4f64(<vscale x 4 x float> %v, double %s) {
ret double %red
}
-declare half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half>)
-
define half @vreduce_fmin_nxv1f16(<vscale x 1 x half> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv1f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv1f16(<vscale x 1 x half> %v)
@@ -489,8 +469,6 @@ define half @vreduce_fmin_nxv1f16_nonans_noinfs(<vscale x 1 x half> %v) #1 {
ret half %red
}
-declare half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half>)
-
define half @vreduce_fmin_nxv2f16(<vscale x 2 x half> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv2f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half> %v)
@@ -504,8 +482,6 @@ define half @vreduce_fmin_nxv2f16(<vscale x 2 x half> %v) {
ret half %red
}
-declare half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half>)
-
define half @vreduce_fmin_nxv4f16(<vscale x 4 x half> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv4f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half> %v)
@@ -519,8 +495,6 @@ define half @vreduce_fmin_nxv4f16(<vscale x 4 x half> %v) {
ret half %red
}
-declare half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half>)
-
define half @vreduce_fmin_nxv64f16(<vscale x 64 x half> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv64f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %red = call half @llvm.vector.reduce.fmin.nxv64f16(<vscale x 64 x half> %v)
@@ -534,8 +508,6 @@ define half @vreduce_fmin_nxv64f16(<vscale x 64 x half> %v) {
ret half %red
}
-declare float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float>)
-
define float @vreduce_fmin_nxv1f32(<vscale x 1 x float> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv1f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv1f32(<vscale x 1 x float> %v)
@@ -575,8 +547,6 @@ define float @vreduce_fmin_nxv1f32_nonans_noinfs(<vscale x 1 x float> %v) {
ret float %red
}
-declare float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float>)
-
define float @vreduce_fmin_nxv2f32(<vscale x 2 x float> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv2f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float> %v)
@@ -590,8 +560,6 @@ define float @vreduce_fmin_nxv2f32(<vscale x 2 x float> %v) {
ret float %red
}
-declare float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float>)
-
define float @vreduce_fmin_nxv4f32(<vscale x 4 x float> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv4f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> %v)
@@ -605,8 +573,6 @@ define float @vreduce_fmin_nxv4f32(<vscale x 4 x float> %v) {
ret float %red
}
-declare float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float>)
-
define float @vreduce_fmin_nxv32f32(<vscale x 32 x float> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv32f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %red = call float @llvm.vector.reduce.fmin.nxv32f32(<vscale x 32 x float> %v)
@@ -620,8 +586,6 @@ define float @vreduce_fmin_nxv32f32(<vscale x 32 x float> %v) {
ret float %red
}
-declare double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double>)
-
define double @vreduce_fmin_nxv1f64(<vscale x 1 x double> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv1f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv1f64(<vscale x 1 x double> %v)
@@ -661,8 +625,6 @@ define double @vreduce_fmin_nxv1f64_nonans_noinfs(<vscale x 1 x double> %v) {
ret double %red
}
-declare double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double>)
-
define double @vreduce_fmin_nxv2f64(<vscale x 2 x double> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv2f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double> %v)
@@ -676,8 +638,6 @@ define double @vreduce_fmin_nxv2f64(<vscale x 2 x double> %v) {
ret double %red
}
-declare double @llvm.vector.reduce.fmin.nxv4f64(<vscale x 4 x double>)
-
define double @vreduce_fmin_nxv4f64(<vscale x 4 x double> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv4f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv4f64(<vscale x 4 x double> %v)
@@ -691,8 +651,6 @@ define double @vreduce_fmin_nxv4f64(<vscale x 4 x double> %v) {
ret double %red
}
-declare double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double>)
-
define double @vreduce_fmin_nxv16f64(<vscale x 16 x double> %v) {
; CHECK-LABEL: 'vreduce_fmin_nxv16f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %red = call double @llvm.vector.reduce.fmin.nxv16f64(<vscale x 16 x double> %v)
@@ -706,8 +664,6 @@ define double @vreduce_fmin_nxv16f64(<vscale x 16 x double> %v) {
ret double %red
}
-declare half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half>)
-
define half @vreduce_fmax_nxv1f16(<vscale x 1 x half> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv1f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv1f16(<vscale x 1 x half> %v)
@@ -747,8 +703,6 @@ define half @vreduce_fmax_nxv1f16_nonans_noinfs(<vscale x 1 x half> %v) #1 {
ret half %red
}
-declare half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half>)
-
define half @vreduce_fmax_nxv2f16(<vscale x 2 x half> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv2f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half> %v)
@@ -762,8 +716,6 @@ define half @vreduce_fmax_nxv2f16(<vscale x 2 x half> %v) {
ret half %red
}
-declare half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half>)
-
define half @vreduce_fmax_nxv4f16(<vscale x 4 x half> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv4f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half> %v)
@@ -777,8 +729,6 @@ define half @vreduce_fmax_nxv4f16(<vscale x 4 x half> %v) {
ret half %red
}
-declare half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half>)
-
define half @vreduce_fmax_nxv64f16(<vscale x 64 x half> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv64f16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %red = call half @llvm.vector.reduce.fmax.nxv64f16(<vscale x 64 x half> %v)
@@ -792,8 +742,6 @@ define half @vreduce_fmax_nxv64f16(<vscale x 64 x half> %v) {
ret half %red
}
-declare float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float>)
-
define float @vreduce_fmax_nxv1f32(<vscale x 1 x float> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv1f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv1f32(<vscale x 1 x float> %v)
@@ -833,8 +781,6 @@ define float @vreduce_fmax_nxv1f32_nonans_noinfs(<vscale x 1 x float> %v) {
ret float %red
}
-declare float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float>)
-
define float @vreduce_fmax_nxv2f32(<vscale x 2 x float> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv2f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float> %v)
@@ -848,8 +794,6 @@ define float @vreduce_fmax_nxv2f32(<vscale x 2 x float> %v) {
ret float %red
}
-declare float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float>)
-
define float @vreduce_fmax_nxv4f32(<vscale x 4 x float> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv4f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> %v)
@@ -863,8 +807,6 @@ define float @vreduce_fmax_nxv4f32(<vscale x 4 x float> %v) {
ret float %red
}
-declare float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float>)
-
define float @vreduce_fmax_nxv32f32(<vscale x 32 x float> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv32f32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %red = call float @llvm.vector.reduce.fmax.nxv32f32(<vscale x 32 x float> %v)
@@ -878,8 +820,6 @@ define float @vreduce_fmax_nxv32f32(<vscale x 32 x float> %v) {
ret float %red
}
-declare double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double>)
-
define double @vreduce_fmax_nxv1f64(<vscale x 1 x double> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv1f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv1f64(<vscale x 1 x double> %v)
@@ -919,8 +859,6 @@ define double @vreduce_fmax_nxv1f64_nonans_noinfs(<vscale x 1 x double> %v) {
ret double %red
}
-declare double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double>)
-
define double @vreduce_fmax_nxv2f64(<vscale x 2 x double> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv2f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double> %v)
@@ -934,8 +872,6 @@ define double @vreduce_fmax_nxv2f64(<vscale x 2 x double> %v) {
ret double %red
}
-declare double @llvm.vector.reduce.fmax.nxv4f64(<vscale x 4 x double>)
-
define double @vreduce_fmax_nxv4f64(<vscale x 4 x double> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv4f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv4f64(<vscale x 4 x double> %v)
@@ -949,8 +885,6 @@ define double @vreduce_fmax_nxv4f64(<vscale x 4 x double> %v) {
ret double %red
}
-declare double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double>)
-
define double @vreduce_fmax_nxv16f64(<vscale x 16 x double> %v) {
; CHECK-LABEL: 'vreduce_fmax_nxv16f64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %red = call double @llvm.vector.reduce.fmax.nxv16f64(<vscale x 16 x double> %v)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
index a1a22b56358a3..131e89f2fc2a3 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
@@ -4,8 +4,6 @@
; RUN: opt < %s -mtriple=riscv32 -mattr=+v -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output | FileCheck %s --check-prefix=SIZE
; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output | FileCheck %s --check-prefix=SIZE
-declare i8 @llvm.vector.reduce.add.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_add_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_add_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.add.nxv1i8(<vscale x 1 x i8> %v)
@@ -19,8 +17,6 @@ define signext i8 @vreduce_add_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_umax_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8> %v)
@@ -34,8 +30,6 @@ define signext i8 @vreduce_umax_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_smax_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8> %v)
@@ -49,8 +43,6 @@ define signext i8 @vreduce_smax_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_umin_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8> %v)
@@ -64,8 +56,6 @@ define signext i8 @vreduce_umin_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_smin_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8> %v)
@@ -79,8 +69,6 @@ define signext i8 @vreduce_smin_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.and.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_and_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_and_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.and.nxv1i8(<vscale x 1 x i8> %v)
@@ -94,8 +82,6 @@ define signext i8 @vreduce_and_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.or.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_or_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_or_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i8 @llvm.vector.reduce.or.nxv1i8(<vscale x 1 x i8> %v)
@@ -109,8 +95,6 @@ define signext i8 @vreduce_or_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.xor.nxv1i8(<vscale x 1 x i8>)
-
define signext i8 @vreduce_xor_nxv1i8(<vscale x 1 x i8> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv1i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.xor.nxv1i8(<vscale x 1 x i8> %v)
@@ -124,8 +108,6 @@ define signext i8 @vreduce_xor_nxv1i8(<vscale x 1 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_add_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_add_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8> %v)
@@ -139,8 +121,6 @@ define signext i8 @vreduce_add_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_umax_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8> %v)
@@ -154,8 +134,6 @@ define signext i8 @vreduce_umax_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_smax_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8> %v)
@@ -169,8 +147,6 @@ define signext i8 @vreduce_smax_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_umin_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8> %v)
@@ -184,8 +160,6 @@ define signext i8 @vreduce_umin_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_smin_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8> %v)
@@ -199,8 +173,6 @@ define signext i8 @vreduce_smin_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.and.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_and_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_and_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.and.nxv2i8(<vscale x 2 x i8> %v)
@@ -214,8 +186,6 @@ define signext i8 @vreduce_and_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.or.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_or_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_or_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i8 @llvm.vector.reduce.or.nxv2i8(<vscale x 2 x i8> %v)
@@ -229,8 +199,6 @@ define signext i8 @vreduce_or_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.xor.nxv2i8(<vscale x 2 x i8>)
-
define signext i8 @vreduce_xor_nxv2i8(<vscale x 2 x i8> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv2i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.xor.nxv2i8(<vscale x 2 x i8> %v)
@@ -244,8 +212,6 @@ define signext i8 @vreduce_xor_nxv2i8(<vscale x 2 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_add_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_add_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8> %v)
@@ -259,8 +225,6 @@ define signext i8 @vreduce_add_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_umax_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8> %v)
@@ -274,8 +238,6 @@ define signext i8 @vreduce_umax_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_smax_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8> %v)
@@ -289,8 +251,6 @@ define signext i8 @vreduce_smax_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_umin_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8> %v)
@@ -304,8 +264,6 @@ define signext i8 @vreduce_umin_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_smin_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8> %v)
@@ -319,8 +277,6 @@ define signext i8 @vreduce_smin_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.and.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_and_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_and_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.and.nxv4i8(<vscale x 4 x i8> %v)
@@ -334,8 +290,6 @@ define signext i8 @vreduce_and_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.or.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_or_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_or_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i8 @llvm.vector.reduce.or.nxv4i8(<vscale x 4 x i8> %v)
@@ -349,8 +303,6 @@ define signext i8 @vreduce_or_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i8 @llvm.vector.reduce.xor.nxv4i8(<vscale x 4 x i8>)
-
define signext i8 @vreduce_xor_nxv4i8(<vscale x 4 x i8> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv4i8'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i8 @llvm.vector.reduce.xor.nxv4i8(<vscale x 4 x i8> %v)
@@ -364,8 +316,6 @@ define signext i8 @vreduce_xor_nxv4i8(<vscale x 4 x i8> %v) {
ret i8 %red
}
-declare i16 @llvm.vector.reduce.add.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_add_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_add_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.add.nxv1i16(<vscale x 1 x i16> %v)
@@ -411,8 +361,6 @@ define signext i16 @vwreduce_uadd_nxv1i8(<vscale x 1 x i8> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_umax_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16> %v)
@@ -426,8 +374,6 @@ define signext i16 @vreduce_umax_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_smax_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16> %v)
@@ -441,8 +387,6 @@ define signext i16 @vreduce_smax_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_umin_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16> %v)
@@ -456,8 +400,6 @@ define signext i16 @vreduce_umin_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_smin_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16> %v)
@@ -471,8 +413,6 @@ define signext i16 @vreduce_smin_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.and.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_and_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_and_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.and.nxv1i16(<vscale x 1 x i16> %v)
@@ -486,8 +426,6 @@ define signext i16 @vreduce_and_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.or.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_or_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_or_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i16 @llvm.vector.reduce.or.nxv1i16(<vscale x 1 x i16> %v)
@@ -501,8 +439,6 @@ define signext i16 @vreduce_or_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.xor.nxv1i16(<vscale x 1 x i16>)
-
define signext i16 @vreduce_xor_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv1i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.xor.nxv1i16(<vscale x 1 x i16> %v)
@@ -516,8 +452,6 @@ define signext i16 @vreduce_xor_nxv1i16(<vscale x 1 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_add_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_add_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16> %v)
@@ -563,8 +497,6 @@ define signext i16 @vwreduce_uadd_nxv2i8(<vscale x 2 x i8> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_umax_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16> %v)
@@ -578,8 +510,6 @@ define signext i16 @vreduce_umax_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_smax_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16> %v)
@@ -593,8 +523,6 @@ define signext i16 @vreduce_smax_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_umin_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16> %v)
@@ -608,8 +536,6 @@ define signext i16 @vreduce_umin_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_smin_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16> %v)
@@ -623,8 +549,6 @@ define signext i16 @vreduce_smin_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.and.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_and_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_and_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.and.nxv2i16(<vscale x 2 x i16> %v)
@@ -638,8 +562,6 @@ define signext i16 @vreduce_and_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.or.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_or_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_or_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i16 @llvm.vector.reduce.or.nxv2i16(<vscale x 2 x i16> %v)
@@ -653,8 +575,6 @@ define signext i16 @vreduce_or_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.xor.nxv2i16(<vscale x 2 x i16>)
-
define signext i16 @vreduce_xor_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv2i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.xor.nxv2i16(<vscale x 2 x i16> %v)
@@ -668,8 +588,6 @@ define signext i16 @vreduce_xor_nxv2i16(<vscale x 2 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_add_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16> %v)
@@ -715,8 +633,6 @@ define signext i16 @vwreduce_uadd_nxv4i8(<vscale x 4 x i8> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_umax_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16> %v)
@@ -730,8 +646,6 @@ define signext i16 @vreduce_umax_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_smax_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16> %v)
@@ -745,8 +659,6 @@ define signext i16 @vreduce_smax_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_umin_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16> %v)
@@ -760,8 +672,6 @@ define signext i16 @vreduce_umin_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_smin_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16> %v)
@@ -775,8 +685,6 @@ define signext i16 @vreduce_smin_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.and.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_and_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_and_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.and.nxv4i16(<vscale x 4 x i16> %v)
@@ -790,8 +698,6 @@ define signext i16 @vreduce_and_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.or.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_or_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_or_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i16 @llvm.vector.reduce.or.nxv4i16(<vscale x 4 x i16> %v)
@@ -805,8 +711,6 @@ define signext i16 @vreduce_or_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i16 @llvm.vector.reduce.xor.nxv4i16(<vscale x 4 x i16>)
-
define signext i16 @vreduce_xor_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv4i16'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i16 @llvm.vector.reduce.xor.nxv4i16(<vscale x 4 x i16> %v)
@@ -820,8 +724,6 @@ define signext i16 @vreduce_xor_nxv4i16(<vscale x 4 x i16> %v) {
ret i16 %red
}
-declare i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_add_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_add_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32> %v)
@@ -867,8 +769,6 @@ define signext i32 @vwreduce_uadd_nxv1i16(<vscale x 1 x i16> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_umax_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> %v)
@@ -882,8 +782,6 @@ define signext i32 @vreduce_umax_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_smax_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32> %v)
@@ -897,8 +795,6 @@ define signext i32 @vreduce_smax_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_umin_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32> %v)
@@ -912,8 +808,6 @@ define signext i32 @vreduce_umin_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_smin_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32> %v)
@@ -927,8 +821,6 @@ define signext i32 @vreduce_smin_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.and.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_and_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_and_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.and.nxv1i32(<vscale x 1 x i32> %v)
@@ -942,8 +834,6 @@ define signext i32 @vreduce_and_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.or.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_or_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_or_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i32 @llvm.vector.reduce.or.nxv1i32(<vscale x 1 x i32> %v)
@@ -957,8 +847,6 @@ define signext i32 @vreduce_or_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.xor.nxv1i32(<vscale x 1 x i32>)
-
define signext i32 @vreduce_xor_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv1i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.xor.nxv1i32(<vscale x 1 x i32> %v)
@@ -972,8 +860,6 @@ define signext i32 @vreduce_xor_nxv1i32(<vscale x 1 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_add_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %v)
@@ -1019,8 +905,6 @@ define signext i32 @vwreduce_uadd_nxv2i16(<vscale x 2 x i16> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_umax_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32> %v)
@@ -1034,8 +918,6 @@ define signext i32 @vreduce_umax_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_smax_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32> %v)
@@ -1049,8 +931,6 @@ define signext i32 @vreduce_smax_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_umin_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32> %v)
@@ -1064,8 +944,6 @@ define signext i32 @vreduce_umin_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_smin_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32> %v)
@@ -1079,8 +957,6 @@ define signext i32 @vreduce_smin_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.and.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_and_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_and_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.and.nxv2i32(<vscale x 2 x i32> %v)
@@ -1094,8 +970,6 @@ define signext i32 @vreduce_and_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_or_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_or_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> %v)
@@ -1109,8 +983,6 @@ define signext i32 @vreduce_or_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.xor.nxv2i32(<vscale x 2 x i32>)
-
define signext i32 @vreduce_xor_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv2i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.xor.nxv2i32(<vscale x 2 x i32> %v)
@@ -1124,8 +996,6 @@ define signext i32 @vreduce_xor_nxv2i32(<vscale x 2 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_add_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %v)
@@ -1171,8 +1041,6 @@ define signext i32 @vwreduce_uadd_nxv4i16(<vscale x 4 x i16> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_umax_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %v)
@@ -1186,8 +1054,6 @@ define signext i32 @vreduce_umax_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_smax_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %v)
@@ -1201,8 +1067,6 @@ define signext i32 @vreduce_smax_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_umin_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %v)
@@ -1216,8 +1080,6 @@ define signext i32 @vreduce_umin_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_smin_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %v)
@@ -1231,8 +1093,6 @@ define signext i32 @vreduce_smin_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_and_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_and_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> %v)
@@ -1246,8 +1106,6 @@ define signext i32 @vreduce_and_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_or_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_or_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> %v)
@@ -1261,8 +1119,6 @@ define signext i32 @vreduce_or_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32>)
-
define signext i32 @vreduce_xor_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv4i32'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> %v)
@@ -1276,8 +1132,6 @@ define signext i32 @vreduce_xor_nxv4i32(<vscale x 4 x i32> %v) {
ret i32 %red
}
-declare i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_add_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_add_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %v)
@@ -1323,8 +1177,6 @@ define i64 @vwreduce_uadd_nxv1i32(<vscale x 1 x i32> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
@@ -1338,8 +1190,6 @@ define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
@@ -1353,8 +1203,6 @@ define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
@@ -1368,8 +1216,6 @@ define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
@@ -1383,8 +1229,6 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_and_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64> %v)
@@ -1398,8 +1242,6 @@ define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_or_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %red = call i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64> %v)
@@ -1413,8 +1255,6 @@ define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64>)
-
define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv1i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64> %v)
@@ -1428,8 +1268,6 @@ define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_add_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %v)
@@ -1475,8 +1313,6 @@ define i64 @vwreduce_uadd_nxv2i32(<vscale x 2 x i32> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
@@ -1490,8 +1326,6 @@ define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
@@ -1505,8 +1339,6 @@ define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
@@ -1520,8 +1352,6 @@ define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
@@ -1535,8 +1365,6 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_and_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> %v)
@@ -1550,8 +1378,6 @@ define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_or_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %red = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> %v)
@@ -1565,8 +1391,6 @@ define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64>)
-
define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv2i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> %v)
@@ -1580,8 +1404,6 @@ define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_add_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %v)
@@ -1627,8 +1449,6 @@ define i64 @vwreduce_uadd_nxv4i32(<vscale x 4 x i32> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_umax_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
@@ -1642,8 +1462,6 @@ define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_smax_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
@@ -1657,8 +1475,6 @@ define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_umin_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
@@ -1672,8 +1488,6 @@ define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_smin_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
@@ -1687,8 +1501,6 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_and_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64> %v)
@@ -1702,8 +1514,6 @@ define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_or_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64> %v)
@@ -1717,8 +1527,6 @@ define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
ret i64 %red
}
-declare i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64>)
-
define i64 @vreduce_xor_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-LABEL: 'vreduce_xor_nxv4i64'
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64> %v)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-xor.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-xor.ll
index aa03b02895d5f..abc5335afbc98 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-xor.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-xor.ll
@@ -173,44 +173,3 @@ define i32 @reduce_i64(i32 %arg) {
%V128 = call i64 @llvm.vector.reduce.xor.v128i64(<128 x i64> undef)
ret i32 undef
}
-
-declare i1 @llvm.vector.reduce.xor.v1i1(<1 x i1>)
-declare i1 @llvm.vector.reduce.xor.v2i1(<2 x i1>)
-declare i1 @llvm.vector.reduce.xor.v4i1(<4 x i1>)
-declare i1 @llvm.vector.reduce.xor.v8i1(<8 x i1>)
-declare i1 @llvm.vector.reduce.xor.v16i1(<16 x i1>)
-declare i1 @llvm.vector.reduce.xor.v32i1(<32 x i1>)
-declare i1 @llvm.vector.reduce.xor.v64i1(<64 x i1>)
-declare i1 @llvm.vector.reduce.xor.v128i1(<128 x i1>)
-declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>)
-declare i8 @llvm.vector.reduce.xor.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
-declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>)
-declare i8 @llvm.vector.reduce.xor.v64i8(<64 x i8>)
-declare i8 @llvm.vector.reduce.xor.v128i8(<128 x i8>)
-declare i16 @llvm.vector.reduce.xor.v1i16(<1 x i16>)
-declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>)
-declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
-declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
-declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
-declare i16 @llvm.vector.reduce.xor.v32i16(<32 x i16>)
-declare i16 @llvm.vector.reduce.xor.v64i16(<64 x i16>)
-declare i16 @llvm.vector.reduce.xor.v128i16(<128 x i16>)
-declare i32 @llvm.vector.reduce.xor.v1i32(<1 x i32>)
-declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
-declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
-declare i32 @llvm.vector.reduce.xor.v16i32(<16 x i32>)
-declare i32 @llvm.vector.reduce.xor.v32i32(<32 x i32>)
-declare i32 @llvm.vector.reduce.xor.v64i32(<64 x i32>)
-declare i32 @llvm.vector.reduce.xor.v128i32(<128 x i32>)
-declare i64 @llvm.vector.reduce.xor.v1i64(<1 x i64>)
-declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.xor.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>)
-declare i64 @llvm.vector.reduce.xor.v32i64(<32 x i64>)
-declare i64 @llvm.vector.reduce.xor.v64i64(<64 x i64>)
-declare i64 @llvm.vector.reduce.xor.v128i64(<128 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-expandload-compressstore.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-expandload-compressstore.ll
index e0f80f541bc19..94e2390a12fc1 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-expandload-compressstore.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-expandload-compressstore.ll
@@ -142,36 +142,3 @@ define void @compress_store() {
ret void
}
-declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
-declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.expandload.v16i8(ptr, <16 x i1>, <16 x i8>)
-declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
-declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
-declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
-declare <16 x i64> @llvm.masked.expandload.v16i64(ptr, <16 x i1>, <16 x i64>)
-declare <vscale x 2 x i8> @llvm.masked.expandload.nxv2i8(ptr, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 4 x i8> @llvm.masked.expandload.nxv4i8(ptr, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 8 x i8> @llvm.masked.expandload.nxv8i8(ptr, <vscale x 8 x i1>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.masked.expandload.nxv16i8(ptr, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 2 x i64> @llvm.masked.expandload.nxv2i64(ptr, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.masked.expandload.nxv4i64(ptr, <vscale x 4 x i1>, <vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.masked.expandload.nxv8i64(ptr, <vscale x 8 x i1>, <vscale x 8 x i64>)
-declare <vscale x 16 x i64> @llvm.masked.expandload.nxv16i64(ptr, <vscale x 16 x i1>, <vscale x 16 x i64>)
-
-declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
-declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
-declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
-declare void @llvm.masked.compressstore.v16i8(<16 x i8>, ptr, <16 x i1>)
-declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
-declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
-declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
-declare void @llvm.masked.compressstore.v16i64(<16 x i64>, ptr, <16 x i1>)
-declare void @llvm.masked.compressstore.nxv2i8(<vscale x 2 x i8>, ptr, <vscale x 2 x i1>)
-declare void @llvm.masked.compressstore.nxv4i8(<vscale x 4 x i8>, ptr, <vscale x 4 x i1>)
-declare void @llvm.masked.compressstore.nxv8i8(<vscale x 8 x i8>, ptr, <vscale x 8 x i1>)
-declare void @llvm.masked.compressstore.nxv16i8(<vscale x 16 x i8>, ptr, <vscale x 16 x i1>)
-declare void @llvm.masked.compressstore.nxv2i64(<vscale x 2 x i64>, ptr, <vscale x 2 x i1>)
-declare void @llvm.masked.compressstore.nxv4i64(<vscale x 4 x i64>, ptr, <vscale x 4 x i1>)
-declare void @llvm.masked.compressstore.nxv8i64(<vscale x 8 x i64>, ptr, <vscale x 8 x i1>)
-declare void @llvm.masked.compressstore.nxv16i64(<vscale x 16 x i64>, ptr, <vscale x 16 x i1>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
index 437a9af8fcc83..ece528d9a6030 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
@@ -71,10 +71,6 @@ define void @vector_insert_extract(<vscale x 4 x i32> %v0, <vscale x 16 x i32> %
%insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
ret void
}
-declare <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32>, <16 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32>, i64)
-declare <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64)
define void @vector_reverse() {
; CHECK-LABEL: 'vector_reverse'
@@ -143,25 +139,6 @@ define void @vector_reverse() {
ret void
}
-declare <vscale x 16 x i8> @llvm.vector.reverse.nxv16i8(<vscale x 16 x i8>)
-declare <vscale x 32 x i8> @llvm.vector.reverse.nxv32i8(<vscale x 32 x i8>)
-declare <vscale x 2 x i16> @llvm.vector.reverse.nxv2i16(<vscale x 2 x i16>)
-declare <vscale x 4 x i16> @llvm.vector.reverse.nxv4i16(<vscale x 4 x i16>)
-declare <vscale x 8 x i16> @llvm.vector.reverse.nxv8i16(<vscale x 8 x i16>)
-declare <vscale x 16 x i16> @llvm.vector.reverse.nxv16i16(<vscale x 16 x i16>)
-declare <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32>)
-declare <vscale x 8 x i32> @llvm.vector.reverse.nxv8i32(<vscale x 8 x i32>)
-declare <vscale x 2 x i64> @llvm.vector.reverse.nxv2i64(<vscale x 2 x i64>)
-declare <vscale x 4 x i64> @llvm.vector.reverse.nxv4i64(<vscale x 4 x i64>)
-declare <vscale x 8 x i64> @llvm.vector.reverse.nxv8i64(<vscale x 8 x i64>)
-declare <vscale x 16 x i64> @llvm.vector.reverse.nxv16i64(<vscale x 16 x i64>)
-declare <vscale x 32 x i64> @llvm.vector.reverse.nxv32i64(<vscale x 32 x i64>)
-declare <vscale x 16 x i1> @llvm.vector.reverse.nxv16i1(<vscale x 16 x i1>)
-declare <vscale x 8 x i1> @llvm.vector.reverse.nxv8i1(<vscale x 8 x i1>)
-declare <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1>)
-declare <vscale x 2 x i1> @llvm.vector.reverse.nxv2i1(<vscale x 2 x i1>)
-
-
define void @vector_splice() {
; CHECK-LABEL: 'vector_splice'
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %splice_nxv16i8 = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer, i32 1)
@@ -213,19 +190,3 @@ define void @vector_splice() {
%splice_nxv2i1 = call <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> zeroinitializer, i32 1)
ret void
}
-
-declare <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, i32)
-declare <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, i32)
-declare <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, i32)
-declare <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, i32)
-declare <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, i32)
-declare <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32)
-declare <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, i32)
-declare <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
-declare <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32)
-declare <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
-declare <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i32)
diff --git a/llvm/test/Analysis/CostModel/RISCV/vp-intrinsics.ll b/llvm/test/Analysis/CostModel/RISCV/vp-intrinsics.ll
index 04b1d18fca8ee..ea3c47dc34201 100644
--- a/llvm/test/Analysis/CostModel/RISCV/vp-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/vp-intrinsics.ll
@@ -1648,275 +1648,3 @@ define void @splice() {
%splice_nxv2i1 = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> zeroinitializer, i32 1, <vscale x 2 x i1> zeroinitializer, i32 poison, i32 poison)
ret void
}
-
-declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.add.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.add.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.add.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
-declare <2 x i64> @llvm.vp.add.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.add.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.add.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.add.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.add.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i64>, <vscale x 16 x i1>, i32)
-
-declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, i1, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, i1, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, i1, <16 x i1>, i32)
-declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, i1, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, i1, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, i1, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, i1, <16 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8>, i1, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8>, i1, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8>, i1, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8>, i1, <vscale x 16 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64>, i1, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64>, i1, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64>, i1, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64>, i1, <vscale x 16 x i1>, i32)
-
-declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1)
-declare <4 x i8> @llvm.abs.v4i8(<4 x i8>, i1)
-declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
-declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
-declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
-declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
-declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
-declare <16 x i64> @llvm.abs.v16i64(<16 x i64>, i1)
-declare <vscale x 2 x i8> @llvm.abs.nxv2i8(<vscale x 2 x i8>, i1)
-declare <vscale x 4 x i8> @llvm.abs.nxv4i8(<vscale x 4 x i8>, i1)
-declare <vscale x 8 x i8> @llvm.abs.nxv8i8(<vscale x 8 x i8>, i1)
-declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
-declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
-declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
-declare <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64>, i1)
-declare <vscale x 16 x i64> @llvm.abs.nxv16i64(<vscale x 16 x i64>, i1)
-
-declare <2 x i8> @llvm.vp.load.v2i8(ptr, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.load.v4i8(ptr, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.load.v8i8(ptr, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.load.v16i8(ptr, <16 x i1>, i32)
-declare <2 x i64> @llvm.vp.load.v2i64(ptr, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.load.v4i64(ptr, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.load.v8i64(ptr, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.load.v16i64(ptr, <16 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8(ptr, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8(ptr, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8(ptr, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.load.nxv16i8(ptr, <vscale x 16 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64(ptr, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64(ptr, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64(ptr, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.vp.load.nxv16i64(ptr, <vscale x 16 x i1>, i32)
-
-declare void @llvm.vp.store.v2i8(<2 x i8>, ptr, <2 x i1>, i32)
-declare void @llvm.vp.store.v4i8(<4 x i8>, ptr, <4 x i1>, i32)
-declare void @llvm.vp.store.v8i8(<8 x i8>, ptr, <8 x i1>, i32)
-declare void @llvm.vp.store.v16i8(<16 x i8>, ptr, <16 x i1>, i32)
-declare void @llvm.vp.store.v2i64(<2 x i64>, ptr, <2 x i1>, i32)
-declare void @llvm.vp.store.v4i64(<4 x i64>, ptr, <4 x i1>, i32)
-declare void @llvm.vp.store.v8i64(<8 x i64>, ptr, <8 x i1>, i32)
-declare void @llvm.vp.store.v16i64(<16 x i64>, ptr, <16 x i1>, i32)
-declare void @llvm.vp.store.nxv2i8(<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
-declare void @llvm.vp.store.nxv4i8(<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
-declare void @llvm.vp.store.nxv8i8(<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
-declare void @llvm.vp.store.nxv16i8(<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32)
-declare void @llvm.vp.store.nxv2i64(<vscale x 2 x i64>, ptr, <vscale x 2 x i1>, i32)
-declare void @llvm.vp.store.nxv4i64(<vscale x 4 x i64>, ptr, <vscale x 4 x i1>, i32)
-declare void @llvm.vp.store.nxv8i64(<vscale x 8 x i64>, ptr, <vscale x 8 x i1>, i32)
-declare void @llvm.vp.store.nxv16i64(<vscale x 16 x i64>, ptr, <vscale x 16 x i1>, i32)
-
-declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.i64(ptr, i64, <2 x i1>, i32)
-declare <4 x i8> @llvm.experimental.vp.strided.load.v4i8.i64(ptr, i64, <4 x i1>, i32)
-declare <8 x i8> @llvm.experimental.vp.strided.load.v8i8.i64(ptr, i64, <8 x i1>, i32)
-declare <16 x i8> @llvm.experimental.vp.strided.load.v16i8.i64(ptr, i64, <16 x i1>, i32)
-declare <2 x i64> @llvm.experimental.vp.strided.load.v2i64.i64(ptr, i64, <2 x i1>, i32)
-declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.i64(ptr, i64, <4 x i1>, i32)
-declare <8 x i64> @llvm.experimental.vp.strided.load.v8i64.i64(ptr, i64, <8 x i1>, i32)
-declare <16 x i64> @llvm.experimental.vp.strided.load.v16i64.i64(ptr, i64, <16 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.i64(ptr, i64, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.i64(ptr, i64, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.i64(ptr, i64, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.experimental.vp.strided.load.nxv16i8.i64(ptr, i64, <vscale x 16 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.i64(ptr, i64, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.i64(ptr, i64, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.i64(ptr, i64, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i64> @llvm.experimental.vp.strided.load.nxv16i64.i64(ptr, i64, <vscale x 16 x i1>, i32)
-
-declare void @llvm.experimental.vp.strided.store.v2i8.i64(<2 x i8>, ptr, i64, <2 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v4i8.i64(<4 x i8>, ptr, i64, <4 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v8i8.i64(<8 x i8>, ptr, i64, <8 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v16i8.i64(<16 x i8>, ptr, i64, <16 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v2i64.i64(<2 x i64>, ptr, i64, <2 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v4i64.i64(<4 x i64>, ptr, i64, <4 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v8i64.i64(<8 x i64>, ptr, i64, <8 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v16i64.i64(<16 x i64>, ptr, i64, <16 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv2i8.i64(<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv4i8.i64(<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv8i8.i64(<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv16i8.i64(<vscale x 16 x i8>, ptr, i64, <vscale x 16 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv2i64.i64(<vscale x 2 x i64>, ptr, i64, <vscale x 2 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv4i64.i64(<vscale x 4 x i64>, ptr, i64, <vscale x 4 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv8i64.i64(<vscale x 8 x i64>, ptr, i64, <vscale x 8 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.nxv16i64.i64(<vscale x 16 x i64>, ptr, i64, <vscale x 16 x i1>, i32)
-
-declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
-declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
-declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
-declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
-declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
-declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
-declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
-declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
-declare i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8>)
-declare i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8>)
-declare i8 @llvm.vector.reduce.add.nxv8i8(<vscale x 8 x i8>)
-declare i8 @llvm.vector.reduce.add.nxv16i8(<vscale x 16 x i8>)
-declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
-declare i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64>)
-declare i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64>)
-declare i64 @llvm.vector.reduce.add.nxv16i64(<vscale x 16 x i64>)
-
-declare i8 @llvm.vp.reduce.add.v2i8(i8, <2 x i8>, <2 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.v4i8(i8, <4 x i8>, <4 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.v8i8(i8, <8 x i8>, <8 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.v16i8(i8, <16 x i8>, <16 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.v2i64(i64, <2 x i64>, <2 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.v4i64(i64, <4 x i64>, <4 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.v8i64(i64, <8 x i64>, <8 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.v16i64(i64, <16 x i64>, <16 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.nxv2i8(i8, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.nxv4i8(i8, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.nxv8i8(i8, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
-declare i8 @llvm.vp.reduce.add.nxv16i8(i8, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.nxv2i64(i64, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.nxv4i64(i64, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.nxv8i64(i64, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
-declare i64 @llvm.vp.reduce.add.nxv16i64(i64, <vscale x 16 x i64>, <vscale x 16 x i1>, i32)
-
-declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>)
-declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>)
-declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>)
-declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>)
-declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>)
-declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>)
-declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>)
-declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>)
-declare float @llvm.vector.reduce.fadd.nxv2f32(float, <vscale x 2 x float>)
-declare float @llvm.vector.reduce.fadd.nxv4f32(float, <vscale x 4 x float>)
-declare float @llvm.vector.reduce.fadd.nxv8f32(float, <vscale x 8 x float>)
-declare float @llvm.vector.reduce.fadd.nxv16f32(float, <vscale x 16 x float>)
-declare double @llvm.vector.reduce.fadd.nxv2f64(double, <vscale x 2 x double>)
-declare double @llvm.vector.reduce.fadd.nxv4f64(double, <vscale x 4 x double>)
-declare double @llvm.vector.reduce.fadd.nxv8f64(double, <vscale x 8 x double>)
-declare double @llvm.vector.reduce.fadd.nxv16f64(double, <vscale x 16 x double>)
-
-declare float @llvm.vp.reduce.fadd.v2f32(float, <2 x float>, <2 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.v8f32(float, <8 x float>, <8 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.v16f32(float, <16 x float>, <16 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.v2f64(double, <2 x double>, <2 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.v4f64(double, <4 x double>, <4 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.v8f64(double, <8 x double>, <8 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.v16f64(double, <16 x double>, <16 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.nxv2f32(float, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.nxv4f32(float, <vscale x 4 x float>, <vscale x 4 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.nxv8f32(float, <vscale x 8 x float>, <vscale x 8 x i1>, i32)
-declare float @llvm.vp.reduce.fadd.nxv16f32(float, <vscale x 16 x float>, <vscale x 16 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.nxv2f64(double, <vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.nxv4f64(double, <vscale x 4 x double>, <vscale x 4 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.nxv8f64(double, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
-declare double @llvm.vp.reduce.fadd.nxv16f64(double, <vscale x 16 x double>, <vscale x 16 x i1>, i32)
-
-declare <vscale x 1 x i32> @llvm.fshr.nxv4i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c)
-declare <vscale x 1 x i32> @llvm.fshl.nxv4i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c)
-
-declare <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
-declare <vscale x 4 x float> @llvm.powi.nxv4f32.i32(<vscale x 4 x float>, i32)
-declare <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float>)
-
-declare <2 x i8> @llvm.vp.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.fshr.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32)
-declare <vscale x 1 x i8> @llvm.vp.fshr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.fshr.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.fshr.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.fshr.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.fshr.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i8> @llvm.vp.fshr.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
-declare <vscale x 64 x i8> @llvm.vp.fshr.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
-declare <2 x i16> @llvm.vp.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32)
-declare <4 x i16> @llvm.vp.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32)
-declare <8 x i16> @llvm.vp.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32)
-declare <16 x i16> @llvm.vp.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32)
-declare <vscale x 1 x i16> @llvm.vp.fshr.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i16> @llvm.vp.fshr.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i16> @llvm.vp.fshr.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i16> @llvm.vp.fshr.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i16> @llvm.vp.fshr.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i16> @llvm.vp.fshr.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
-declare <2 x i32> @llvm.vp.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32)
-declare <4 x i32> @llvm.vp.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32)
-declare <8 x i32> @llvm.vp.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32)
-declare <16 x i32> @llvm.vp.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32)
-declare <vscale x 1 x i32> @llvm.vp.fshr.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i32> @llvm.vp.fshr.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i32> @llvm.vp.fshr.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i32> @llvm.vp.fshr.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i32> @llvm.vp.fshr.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
-declare <2 x i64> @llvm.vp.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32)
-declare <vscale x 1 x i64> @llvm.vp.fshr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.fshr.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.fshr.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.fshr.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
-
-declare <2 x i8> @llvm.vp.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32)
-declare <4 x i8> @llvm.vp.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32)
-declare <8 x i8> @llvm.vp.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32)
-declare <16 x i8> @llvm.vp.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32)
-declare <vscale x 1 x i8> @llvm.vp.fshl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i8> @llvm.vp.fshl.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i8> @llvm.vp.fshl.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i8> @llvm.vp.fshl.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i8> @llvm.vp.fshl.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i8> @llvm.vp.fshl.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
-declare <vscale x 64 x i8> @llvm.vp.fshl.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
-declare <2 x i16> @llvm.vp.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32)
-declare <4 x i16> @llvm.vp.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32)
-declare <8 x i16> @llvm.vp.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32)
-declare <16 x i16> @llvm.vp.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32)
-declare <vscale x 1 x i16> @llvm.vp.fshl.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i16> @llvm.vp.fshl.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i16> @llvm.vp.fshl.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i16> @llvm.vp.fshl.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i16> @llvm.vp.fshl.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
-declare <vscale x 32 x i16> @llvm.vp.fshl.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
-declare <2 x i32> @llvm.vp.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32)
-declare <4 x i32> @llvm.vp.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32)
-declare <8 x i32> @llvm.vp.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32)
-declare <16 x i32> @llvm.vp.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32)
-declare <vscale x 1 x i32> @llvm.vp.fshl.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i32> @llvm.vp.fshl.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i32> @llvm.vp.fshl.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i32> @llvm.vp.fshl.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
-declare <vscale x 16 x i32> @llvm.vp.fshl.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
-declare <2 x i64> @llvm.vp.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32)
-declare <4 x i64> @llvm.vp.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32)
-declare <8 x i64> @llvm.vp.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32)
-declare <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32)
-declare <vscale x 1 x i64> @llvm.vp.fshl.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
-declare <vscale x 2 x i64> @llvm.vp.fshl.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x i64> @llvm.vp.fshl.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
-declare <vscale x 8 x i64> @llvm.vp.fshl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
More information about the llvm-commits
mailing list