[llvm] 4006928 - [RISCV][CostModel] Add test coverage for all the vectorizable binary intrinsics
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 25 08:56:46 PDT 2022
Author: Philip Reames
Date: 2022-08-25T08:56:02-07:00
New Revision: 400692866951f260b581e2ab1955b4e469652a1c
URL: https://github.com/llvm/llvm-project/commit/400692866951f260b581e2ab1955b4e469652a1c
DIFF: https://github.com/llvm/llvm-project/commit/400692866951f260b581e2ab1955b4e469652a1c.diff
LOG: [RISCV][CostModel] Add test coverage for all the vectorizable binary intrinsics
Added:
llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll
llvm/test/Analysis/CostModel/RISCV/fp-sqrt-pow.ll
llvm/test/Analysis/CostModel/RISCV/fp-trig-log-exp.ll
llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
Modified:
Removed:
llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll
llvm/test/Analysis/CostModel/RISCV/sqrt.ll
llvm/test/Analysis/CostModel/RISCV/trig-log-exp.ll
################################################################################
diff --git a/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll b/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll
new file mode 100644
index 0000000000000..742e1c0853c74
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll
@@ -0,0 +1,374 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d -riscv-v-vector-bits-min=-1 | FileCheck %s
+
+define void @fabs() {
+; CHECK-LABEL: 'fabs'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.fabs.f32(float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x float> @llvm.fabs.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.fabs.f64(double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x double> @llvm.fabs.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.fabs.f32(float undef)
+ call <2 x float> @llvm.fabs.v2f32(<2 x float> undef)
+ call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
+ call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
+ call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
+ call <vscale x 2 x float> @llvm.fabs.nvx2f32(<vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.fabs.nvx4f32(<vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.fabs.nvx8f32(<vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.fabs.nvx16f32(<vscale x 16 x float> undef)
+ call double @llvm.fabs.f64(double undef)
+ call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
+ call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
+ call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
+ call <16 x double> @llvm.fabs.v16f64(<16 x double> undef)
+ call <vscale x 1 x double> @llvm.fabs.nvx1f64(<vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.fabs.nvx2f64(<vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.fabs.nvx4f64(<vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.fabs.nvx8f64(<vscale x 8 x double> undef)
+ ret void
+}
+
+define void @minnum() {
+; CHECK-LABEL: 'minnum'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.minnum.f32(float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x float> @llvm.minnum.v2f32(<2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x float> @llvm.minnum.v4f32(<4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x float> @llvm.minnum.v8f32(<8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x float> @llvm.minnum.v16f32(<16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.minnum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.minnum.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call <2 x double> @llvm.minnum.v2f64(<2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <4 x double> @llvm.minnum.v4f64(<4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x double> @llvm.minnum.v16f64(<16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.minnum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.minnum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.minnum.f32(float undef, float undef)
+ call <2 x float> @llvm.minnum.v2f32(<2 x float> undef, <2 x float> undef)
+ call <4 x float> @llvm.minnum.v4f32(<4 x float> undef, <4 x float> undef)
+ call <8 x float> @llvm.minnum.v8f32(<8 x float> undef, <8 x float> undef)
+ call <16 x float> @llvm.minnum.v16f32(<16 x float> undef, <16 x float> undef)
+ call <vscale x 2 x float> @llvm.minnum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.minnum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.minnum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.minnum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+ call double @llvm.minnum.f64(double undef, double undef)
+ call <2 x double> @llvm.minnum.v2f64(<2 x double> undef, <2 x double> undef)
+ call <4 x double> @llvm.minnum.v4f64(<4 x double> undef, <4 x double> undef)
+ call <8 x double> @llvm.minnum.v8f64(<8 x double> undef, <8 x double> undef)
+ call <16 x double> @llvm.minnum.v16f64(<16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x double> @llvm.minnum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.minnum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.minnum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.minnum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ ret void
+}
+
+define void @maxnum() {
+; CHECK-LABEL: 'maxnum'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.maxnum.f32(float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x float> @llvm.maxnum.v2f32(<2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x float> @llvm.maxnum.v4f32(<4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x float> @llvm.maxnum.v8f32(<8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.maxnum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.maxnum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.maxnum.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call <2 x double> @llvm.maxnum.v2f64(<2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <4 x double> @llvm.maxnum.v4f64(<4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x double> @llvm.maxnum.v16f64(<16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.maxnum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.maxnum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.maxnum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.maxnum.f32(float undef, float undef)
+ call <2 x float> @llvm.maxnum.v2f32(<2 x float> undef, <2 x float> undef)
+ call <4 x float> @llvm.maxnum.v4f32(<4 x float> undef, <4 x float> undef)
+ call <8 x float> @llvm.maxnum.v8f32(<8 x float> undef, <8 x float> undef)
+ call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef, <16 x float> undef)
+ call <vscale x 2 x float> @llvm.maxnum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.maxnum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.maxnum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.maxnum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+ call double @llvm.maxnum.f64(double undef, double undef)
+ call <2 x double> @llvm.maxnum.v2f64(<2 x double> undef, <2 x double> undef)
+ call <4 x double> @llvm.maxnum.v4f64(<4 x double> undef, <4 x double> undef)
+ call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef, <8 x double> undef)
+ call <16 x double> @llvm.maxnum.v16f64(<16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x double> @llvm.maxnum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.maxnum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.maxnum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.maxnum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ ret void
+}
+
+define void @minimum() {
+; CHECK-LABEL: 'minimum'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.minimum.f32(float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x float> @llvm.minimum.v2f32(<2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x float> @llvm.minimum.v4f32(<4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x float> @llvm.minimum.v8f32(<8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x float> @llvm.minimum.v16f32(<16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x float> @llvm.minimum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x float> @llvm.minimum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %10 = call double @llvm.minimum.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x double> @llvm.minimum.v2f64(<2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x double> @llvm.minimum.v4f64(<4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x double> @llvm.minimum.v8f64(<8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x double> @llvm.minimum.v16f64(<16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 1 x double> @llvm.minimum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 4 x double> @llvm.minimum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 8 x double> @llvm.minimum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.minimum.f32(float undef, float undef)
+ call <2 x float> @llvm.minimum.v2f32(<2 x float> undef, <2 x float> undef)
+ call <4 x float> @llvm.minimum.v4f32(<4 x float> undef, <4 x float> undef)
+ call <8 x float> @llvm.minimum.v8f32(<8 x float> undef, <8 x float> undef)
+ call <16 x float> @llvm.minimum.v16f32(<16 x float> undef, <16 x float> undef)
+ call <vscale x 2 x float> @llvm.minimum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.minimum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.minimum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.minimum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+ call double @llvm.minimum.f64(double undef, double undef)
+ call <2 x double> @llvm.minimum.v2f64(<2 x double> undef, <2 x double> undef)
+ call <4 x double> @llvm.minimum.v4f64(<4 x double> undef, <4 x double> undef)
+ call <8 x double> @llvm.minimum.v8f64(<8 x double> undef, <8 x double> undef)
+ call <16 x double> @llvm.minimum.v16f64(<16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x double> @llvm.minimum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.minimum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.minimum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.minimum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ ret void
+}
+
+define void @maximum() {
+; CHECK-LABEL: 'maximum'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.maximum.f32(float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x float> @llvm.maximum.v2f32(<2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x float> @llvm.maximum.v4f32(<4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x float> @llvm.maximum.v8f32(<8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x float> @llvm.maximum.v16f32(<16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x float> @llvm.maximum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x float> @llvm.maximum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %10 = call double @llvm.maximum.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x double> @llvm.maximum.v2f64(<2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x double> @llvm.maximum.v4f64(<4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x double> @llvm.maximum.v8f64(<8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x double> @llvm.maximum.v16f64(<16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 1 x double> @llvm.maximum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 4 x double> @llvm.maximum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 8 x double> @llvm.maximum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.maximum.f32(float undef, float undef)
+ call <2 x float> @llvm.maximum.v2f32(<2 x float> undef, <2 x float> undef)
+ call <4 x float> @llvm.maximum.v4f32(<4 x float> undef, <4 x float> undef)
+ call <8 x float> @llvm.maximum.v8f32(<8 x float> undef, <8 x float> undef)
+ call <16 x float> @llvm.maximum.v16f32(<16 x float> undef, <16 x float> undef)
+ call <vscale x 2 x float> @llvm.maximum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.maximum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.maximum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.maximum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+ call double @llvm.maximum.f64(double undef, double undef)
+ call <2 x double> @llvm.maximum.v2f64(<2 x double> undef, <2 x double> undef)
+ call <4 x double> @llvm.maximum.v4f64(<4 x double> undef, <4 x double> undef)
+ call <8 x double> @llvm.maximum.v8f64(<8 x double> undef, <8 x double> undef)
+ call <16 x double> @llvm.maximum.v16f64(<16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x double> @llvm.maximum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.maximum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.maximum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.maximum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ ret void
+}
+
+define void @copysign() {
+; CHECK-LABEL: 'copysign'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.copysign.f32(float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.copysign.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x double> @llvm.copysign.v16f64(<16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.copysign.f32(float undef, float undef)
+ call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef)
+ call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
+ call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
+ call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
+ call <vscale x 2 x float> @llvm.copysign.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.copysign.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.copysign.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.copysign.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+ call double @llvm.copysign.f64(double undef, double undef)
+ call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
+ call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
+ call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
+ call <16 x double> @llvm.copysign.v16f64(<16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x double> @llvm.copysign.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.copysign.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.copysign.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.copysign.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ ret void
+}
+
+declare float @llvm.fabs.f32(float)
+declare <2 x float> @llvm.fabs.v2f32(<2 x float>)
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
+declare <8 x float> @llvm.fabs.v8f32(<8 x float>)
+declare <16 x float> @llvm.fabs.v16f32(<16 x float>)
+declare <vscale x 2 x float> @llvm.fabs.nvx2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.fabs.nvx4f32(<vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.fabs.nvx8f32(<vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.fabs.nvx16f32(<vscale x 16 x float>)
+declare double @llvm.fabs.f64(double)
+declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
+declare <4 x double> @llvm.fabs.v4f64(<4 x double>)
+declare <8 x double> @llvm.fabs.v8f64(<8 x double>)
+declare <16 x double> @llvm.fabs.v16f64(<16 x double>)
+declare <vscale x 1 x double> @llvm.fabs.nvx1f64(<vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.fabs.nvx2f64(<vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.fabs.nvx4f64(<vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.fabs.nvx8f64(<vscale x 8 x double>)
+
+declare float @llvm.minnum.f32(float, float)
+declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
+declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>)
+declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>)
+declare <vscale x 2 x float> @llvm.minnum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.minnum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.minnum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.minnum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+declare double @llvm.minnum.f64(double, double)
+declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
+declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
+declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
+declare <16 x double> @llvm.minnum.v16f64(<16 x double>, <16 x double>)
+declare <vscale x 1 x double> @llvm.minnum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.minnum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.minnum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.minnum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>)
+
+declare float @llvm.maxnum.f32(float, float)
+declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
+declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>)
+declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>)
+declare <vscale x 2 x float> @llvm.maxnum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.maxnum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.maxnum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.maxnum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+declare double @llvm.maxnum.f64(double, double)
+declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
+declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
+declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
+declare <16 x double> @llvm.maxnum.v16f64(<16 x double>, <16 x double>)
+declare <vscale x 1 x double> @llvm.maxnum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.maxnum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.maxnum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.maxnum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>)
+
+declare float @llvm.minimum.f32(float, float)
+declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.minimum.v4f32(<4 x float>, <4 x float>)
+declare <8 x float> @llvm.minimum.v8f32(<8 x float>, <8 x float>)
+declare <16 x float> @llvm.minimum.v16f32(<16 x float>, <16 x float>)
+declare <vscale x 2 x float> @llvm.minimum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.minimum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.minimum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.minimum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+declare double @llvm.minimum.f64(double, double)
+declare <2 x double> @llvm.minimum.v2f64(<2 x double>, <2 x double>)
+declare <4 x double> @llvm.minimum.v4f64(<4 x double>, <4 x double>)
+declare <8 x double> @llvm.minimum.v8f64(<8 x double>, <8 x double>)
+declare <16 x double> @llvm.minimum.v16f64(<16 x double>, <16 x double>)
+declare <vscale x 1 x double> @llvm.minimum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.minimum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.minimum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.minimum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>)
+
+declare float @llvm.maximum.f32(float, float)
+declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.maximum.v4f32(<4 x float>, <4 x float>)
+declare <8 x float> @llvm.maximum.v8f32(<8 x float>, <8 x float>)
+declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>)
+declare <vscale x 2 x float> @llvm.maximum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.maximum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.maximum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.maximum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+declare double @llvm.maximum.f64(double, double)
+declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>)
+declare <4 x double> @llvm.maximum.v4f64(<4 x double>, <4 x double>)
+declare <8 x double> @llvm.maximum.v8f64(<8 x double>, <8 x double>)
+declare <16 x double> @llvm.maximum.v16f64(<16 x double>, <16 x double>)
+declare <vscale x 1 x double> @llvm.maximum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.maximum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.maximum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.maximum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>)
+
+declare float @llvm.copysign.f32(float, float)
+declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>)
+declare <8 x float> @llvm.copysign.v8f32(<8 x float>, <8 x float>)
+declare <16 x float> @llvm.copysign.v16f32(<16 x float>, <16 x float>)
+declare <vscale x 2 x float> @llvm.copysign.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.copysign.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.copysign.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.copysign.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+declare double @llvm.copysign.f64(double, double)
+declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>)
+declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>)
+declare <8 x double> @llvm.copysign.v8f64(<8 x double>, <8 x double>)
+declare <16 x double> @llvm.copysign.v16f64(<16 x double>, <16 x double>)
+declare <vscale x 1 x double> @llvm.copysign.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.copysign.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.copysign.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.copysign.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/fp-sqrt-pow.ll b/llvm/test/Analysis/CostModel/RISCV/fp-sqrt-pow.ll
new file mode 100644
index 0000000000000..b492facb97f71
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/fp-sqrt-pow.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d -riscv-v-vector-bits-min=-1 | FileCheck %s
+
+define void @sqrt() {
+; CHECK-LABEL: 'sqrt'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.sqrt.f32(float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x float> @llvm.sqrt.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x float> @llvm.sqrt.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.sqrt.nxv8f32(<vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.sqrt.nxv16f32(<vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.sqrt.f64(double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x double> @llvm.sqrt.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x double> @llvm.sqrt.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x double> @llvm.sqrt.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.sqrt.nxv1f64(<vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.sqrt.nxv4f64(<vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.sqrt.nxv8f64(<vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.sqrt.f32(float undef)
+ call <2 x float> @llvm.sqrt.v2f32(<2 x float> undef)
+ call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef)
+ call <8 x float> @llvm.sqrt.v8f32(<8 x float> undef)
+ call <16 x float> @llvm.sqrt.v16f32(<16 x float> undef)
+ call <vscale x 2 x float> @llvm.sqrt.nvx2f32(<vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.sqrt.nvx4f32(<vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.sqrt.nvx8f32(<vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.sqrt.nvx16f32(<vscale x 16 x float> undef)
+ call double @llvm.sqrt.f64(double undef)
+ call <2 x double> @llvm.sqrt.v2f64(<2 x double> undef)
+ call <4 x double> @llvm.sqrt.v4f64(<4 x double> undef)
+ call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef)
+ call <16 x double> @llvm.sqrt.v16f64(<16 x double> undef)
+ call <vscale x 1 x double> @llvm.sqrt.nvx1f64(<vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.sqrt.nvx2f64(<vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.sqrt.nvx4f64(<vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.sqrt.nvx8f64(<vscale x 8 x double> undef)
+ ret void
+}
+
+declare float @llvm.sqrt.f32(float)
+declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
+declare <8 x float> @llvm.sqrt.v8f32(<8 x float>)
+declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
+declare <vscale x 2 x float> @llvm.sqrt.nvx2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.sqrt.nvx4f32(<vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.sqrt.nvx8f32(<vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.sqrt.nvx16f32(<vscale x 16 x float>)
+declare double @llvm.sqrt.f64(double)
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
+declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
+declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
+declare <16 x double> @llvm.sqrt.v16f64(<16 x double>)
+declare <vscale x 1 x double> @llvm.sqrt.nvx1f64(<vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.sqrt.nvx2f64(<vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.sqrt.nvx4f64(<vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.sqrt.nvx8f64(<vscale x 8 x double>)
+
+define void @pow() {
+; CHECK-LABEL: 'pow'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.pow.f32(float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x float> @llvm.pow.v2f32(<2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x float> @llvm.pow.v4f32(<4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x float> @llvm.pow.v8f32(<8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x float> @llvm.pow.v16f32(<16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x float> @llvm.pow.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x float> @llvm.pow.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x float> @llvm.pow.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %10 = call double @llvm.pow.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x double> @llvm.pow.v2f64(<2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x double> @llvm.pow.v4f64(<4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x double> @llvm.pow.v8f64(<8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x double> @llvm.pow.v16f64(<16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 1 x double> @llvm.pow.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 2 x double> @llvm.pow.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 4 x double> @llvm.pow.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 8 x double> @llvm.pow.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call float @llvm.pow.f32(float undef, float undef)
+ call <2 x float> @llvm.pow.v2f32(<2 x float> undef, <2 x float> undef)
+ call <4 x float> @llvm.pow.v4f32(<4 x float> undef, <4 x float> undef)
+ call <8 x float> @llvm.pow.v8f32(<8 x float> undef, <8 x float> undef)
+ call <16 x float> @llvm.pow.v16f32(<16 x float> undef, <16 x float> undef)
+ call <vscale x 2 x float> @llvm.pow.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ call <vscale x 4 x float> @llvm.pow.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ call <vscale x 8 x float> @llvm.pow.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+ call <vscale x 16 x float> @llvm.pow.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+ call double @llvm.pow.f64(double undef, double undef)
+ call <2 x double> @llvm.pow.v2f64(<2 x double> undef, <2 x double> undef)
+ call <4 x double> @llvm.pow.v4f64(<4 x double> undef, <4 x double> undef)
+ call <8 x double> @llvm.pow.v8f64(<8 x double> undef, <8 x double> undef)
+ call <16 x double> @llvm.pow.v16f64(<16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x double> @llvm.pow.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+ call <vscale x 2 x double> @llvm.pow.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ call <vscale x 4 x double> @llvm.pow.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+ call <vscale x 8 x double> @llvm.pow.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ ret void
+}
+
+declare float @llvm.pow.f32(float, float)
+declare <2 x float> @llvm.pow.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
+declare <8 x float> @llvm.pow.v8f32(<8 x float>, <8 x float>)
+declare <16 x float> @llvm.pow.v16f32(<16 x float>, <16 x float>)
+declare <vscale x 2 x float> @llvm.pow.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.pow.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+declare <vscale x 8 x float> @llvm.pow.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+declare <vscale x 16 x float> @llvm.pow.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+declare double @llvm.pow.f64(double, double)
+declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
+declare <4 x double> @llvm.pow.v4f64(<4 x double>, <4 x double>)
+declare <8 x double> @llvm.pow.v8f64(<8 x double>, <8 x double>)
+declare <16 x double> @llvm.pow.v16f64(<16 x double>, <16 x double>)
+declare <vscale x 1 x double> @llvm.pow.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+declare <vscale x 2 x double> @llvm.pow.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 4 x double> @llvm.pow.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+declare <vscale x 8 x double> @llvm.pow.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/trig-log-exp.ll b/llvm/test/Analysis/CostModel/RISCV/fp-trig-log-exp.ll
similarity index 87%
rename from llvm/test/Analysis/CostModel/RISCV/trig-log-exp.ll
rename to llvm/test/Analysis/CostModel/RISCV/fp-trig-log-exp.ll
index 21a9bf1900ac8..ff3ee747c0e54 100644
--- a/llvm/test/Analysis/CostModel/RISCV/trig-log-exp.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fp-trig-log-exp.ll
@@ -302,49 +302,6 @@ define void @log2() {
ret void
}
-define void @fabs() {
-; CHECK-LABEL: 'fabs'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.fabs.f32(float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x float> @llvm.fabs.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.fabs.f64(double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x double> @llvm.fabs.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- call float @llvm.fabs.f32(float undef)
- call <2 x float> @llvm.fabs.v2f32(<2 x float> undef)
- call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
- call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
- call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
- call <vscale x 2 x float> @llvm.fabs.nvx2f32(<vscale x 2 x float> undef)
- call <vscale x 4 x float> @llvm.fabs.nvx4f32(<vscale x 4 x float> undef)
- call <vscale x 8 x float> @llvm.fabs.nvx8f32(<vscale x 8 x float> undef)
- call <vscale x 16 x float> @llvm.fabs.nvx16f32(<vscale x 16 x float> undef)
- call double @llvm.fabs.f64(double undef)
- call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
- call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
- call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
- call <16 x double> @llvm.fabs.v16f64(<16 x double> undef)
- call <vscale x 1 x double> @llvm.fabs.nvx1f64(<vscale x 1 x double> undef)
- call <vscale x 2 x double> @llvm.fabs.nvx2f64(<vscale x 2 x double> undef)
- call <vscale x 4 x double> @llvm.fabs.nvx4f64(<vscale x 4 x double> undef)
- call <vscale x 8 x double> @llvm.fabs.nvx8f64(<vscale x 8 x double> undef)
- ret void
-}
-
declare float @llvm.sin.f32(float)
declare <2 x float> @llvm.sin.v2f32(<2 x float>)
declare <4 x float> @llvm.sin.v4f32(<4 x float>)
@@ -477,22 +434,3 @@ declare <vscale x 1 x double> @llvm.log2.nvx1f64(<vscale x 1 x double>)
declare <vscale x 2 x double> @llvm.log2.nvx2f64(<vscale x 2 x double>)
declare <vscale x 4 x double> @llvm.log2.nvx4f64(<vscale x 4 x double>)
declare <vscale x 8 x double> @llvm.log2.nvx8f64(<vscale x 8 x double>)
-
-declare float @llvm.fabs.f32(float)
-declare <2 x float> @llvm.fabs.v2f32(<2 x float>)
-declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
-declare <8 x float> @llvm.fabs.v8f32(<8 x float>)
-declare <16 x float> @llvm.fabs.v16f32(<16 x float>)
-declare <vscale x 2 x float> @llvm.fabs.nvx2f32(<vscale x 2 x float>)
-declare <vscale x 4 x float> @llvm.fabs.nvx4f32(<vscale x 4 x float>)
-declare <vscale x 8 x float> @llvm.fabs.nvx8f32(<vscale x 8 x float>)
-declare <vscale x 16 x float> @llvm.fabs.nvx16f32(<vscale x 16 x float>)
-declare double @llvm.fabs.f64(double)
-declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
-declare <4 x double> @llvm.fabs.v4f64(<4 x double>)
-declare <8 x double> @llvm.fabs.v8f64(<8 x double>)
-declare <16 x double> @llvm.fabs.v16f64(<16 x double>)
-declare <vscale x 1 x double> @llvm.fabs.nvx1f64(<vscale x 1 x double>)
-declare <vscale x 2 x double> @llvm.fabs.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.fabs.nvx4f64(<vscale x 4 x double>)
-declare <vscale x 8 x double> @llvm.fabs.nvx8f64(<vscale x 8 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
similarity index 100%
rename from llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll
rename to llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll
diff --git a/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll b/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
new file mode 100644
index 0000000000000..d05e3f0d4cdb6
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll
@@ -0,0 +1,454 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define void @smax() {
+; CHECK-LABEL: 'smax'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.smax.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %2 = call <2 x i8> @llvm.smax.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %3 = call <4 x i8> @llvm.smax.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %4 = call <8 x i8> @llvm.smax.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %5 = call <16 x i8> @llvm.smax.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call i16 @llvm.smax.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %11 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %12 = call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %13 = call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %14 = call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %16 = call <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call i32 @llvm.smax.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %21 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %22 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %23 = call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %26 = call <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call i64 @llvm.smax.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %31 = call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %32 = call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %34 = call <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %35 = call <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.smax.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.smax.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.smax.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.smax.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.smax.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.smax.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.smax.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.smax.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.smax.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.smax.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.smax.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.smax.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.smax.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.smax.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.smax.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.smax.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.smax.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.smax.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.smax.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.smax.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.smax.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.smax.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.smax.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @smin() {
+; CHECK-LABEL: 'smin'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.smin.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %2 = call <2 x i8> @llvm.smin.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %3 = call <4 x i8> @llvm.smin.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %4 = call <8 x i8> @llvm.smin.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %5 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call i16 @llvm.smin.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %11 = call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %12 = call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %13 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %14 = call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %16 = call <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call i32 @llvm.smin.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %21 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %22 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %23 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %26 = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call i64 @llvm.smin.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %31 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %32 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %34 = call <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %35 = call <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.smin.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.smin.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.smin.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.smin.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.smin.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.smin.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.smin.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.smin.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.smin.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.smin.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.smin.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.smin.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.smin.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.smin.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.smin.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.smin.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.smin.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.smin.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.smin.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.smin.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.smin.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.smin.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.smin.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @umax() {
+; CHECK-LABEL: 'umax'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.umax.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %2 = call <2 x i8> @llvm.umax.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %3 = call <4 x i8> @llvm.umax.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %4 = call <8 x i8> @llvm.umax.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %5 = call <16 x i8> @llvm.umax.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call i16 @llvm.umax.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %11 = call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %12 = call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %13 = call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %14 = call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %16 = call <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call i32 @llvm.umax.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %21 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %22 = call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %23 = call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %26 = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call i64 @llvm.umax.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %31 = call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %32 = call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %34 = call <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %35 = call <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.umax.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.umax.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.umax.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.umax.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.umax.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.umax.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.umax.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.umax.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.umax.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.umax.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.umax.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.umax.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.umax.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.umax.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.umax.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.umax.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.umax.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.umax.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.umax.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.umax.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.umax.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.umax.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.umax.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @umin() {
+; CHECK-LABEL: 'umin'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.umin.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %2 = call <2 x i8> @llvm.umin.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %3 = call <4 x i8> @llvm.umin.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %4 = call <8 x i8> @llvm.umin.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %5 = call <16 x i8> @llvm.umin.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call i16 @llvm.umin.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %11 = call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %12 = call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %13 = call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %14 = call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %16 = call <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call i32 @llvm.umin.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %21 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %22 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %23 = call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %26 = call <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call i64 @llvm.umin.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %31 = call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %32 = call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %34 = call <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %35 = call <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.umin.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.umin.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.umin.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.umin.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.umin.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.umin.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.umin.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.umin.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.umin.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.umin.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.umin.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.umin.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.umin.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.umin.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.umin.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.umin.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.umin.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.umin.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.umin.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.umin.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.umin.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.umin.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.umin.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+declare i8 @llvm.smax.i8(i8, i8)
+declare <2 x i8> @llvm.smax.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.smax.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.smax.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.smax.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.smax.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.smax.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.smax.i16(i16, i16)
+declare <2 x i16> @llvm.smax.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.smax.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.smax.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.smax.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.smax.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.smax.i32(i32, i32)
+declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.smax.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.smax.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.smax.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.smax.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.smax.i64(i64, i64)
+declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.smax.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.smax.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.smax.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.smax.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.smax.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.smin.i8(i8, i8)
+declare <2 x i8> @llvm.smin.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.smin.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.smin.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.smin.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.smin.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.smin.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.smin.i16(i16, i16)
+declare <2 x i16> @llvm.smin.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.smin.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.smin.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.smin.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.smin.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.smin.i32(i32, i32)
+declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.smin.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.smin.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.smin.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.smin.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.smin.i64(i64, i64)
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.smin.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.smin.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.smin.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.smin.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.smin.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.umax.i8(i8, i8)
+declare <2 x i8> @llvm.umax.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.umax.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.umax.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.umax.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.umax.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.umax.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.umax.i16(i16, i16)
+declare <2 x i16> @llvm.umax.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.umax.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.umax.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.umax.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.umax.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.umax.i32(i32, i32)
+declare <2 x i32> @llvm.umax.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.umax.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.umax.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.umax.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.umax.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.umax.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.umax.i64(i64, i64)
+declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.umax.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.umax.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.umax.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.umax.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.umax.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.umin.i8(i8, i8)
+declare <2 x i8> @llvm.umin.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.umin.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.umin.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.umin.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.umin.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.umin.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.umin.i16(i16, i16)
+declare <2 x i16> @llvm.umin.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.umin.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.umin.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.umin.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.umin.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.umin.i32(i32, i32)
+declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.umin.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.umin.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.umin.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.umin.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.umin.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.umin.i64(i64, i64)
+declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.umin.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.umin.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.umin.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.umin.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.umin.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll b/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
new file mode 100644
index 0000000000000..2ec80870d00d6
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll
@@ -0,0 +1,680 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define void @sadd_sat() {
+; CHECK-LABEL: 'sadd_sat'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.sadd_sat.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.sadd_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.sadd_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.sadd_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.sadd_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.sadd_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.sadd_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.sadd_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.sadd_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.sadd_sat.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.sadd_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.sadd_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.sadd_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %14 = call <16 x i16> @llvm.sadd_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.sadd_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.sadd_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.sadd_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.sadd_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.sadd_sat.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.sadd_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.sadd_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %22 = call <8 x i32> @llvm.sadd_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %23 = call <16 x i32> @llvm.sadd_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.sadd_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.sadd_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.sadd_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.sadd_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.sadd_sat.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.sadd_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <4 x i64> @llvm.sadd_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %31 = call <8 x i64> @llvm.sadd_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %32 = call <16 x i64> @llvm.sadd_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.sadd_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.sadd_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.sadd_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.sadd_sat.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.sadd_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.sadd_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.sadd_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.sadd_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.sadd_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.sadd_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.sadd_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.sadd_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.sadd_sat.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.sadd_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.sadd_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.sadd_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.sadd_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.sadd_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.sadd_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.sadd_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.sadd_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.sadd_sat.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.sadd_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.sadd_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.sadd_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.sadd_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.sadd_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.sadd_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.sadd_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.sadd_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.sadd_sat.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.sadd_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.sadd_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.sadd_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.sadd_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.sadd_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.sadd_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.sadd_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @uadd_sat() {
+; CHECK-LABEL: 'uadd_sat'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.uadd_sat.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.uadd_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.uadd_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.uadd_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.uadd_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.uadd_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.uadd_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.uadd_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.uadd_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.uadd_sat.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.uadd_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.uadd_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.uadd_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %14 = call <16 x i16> @llvm.uadd_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.uadd_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.uadd_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.uadd_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.uadd_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.uadd_sat.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.uadd_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.uadd_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %22 = call <8 x i32> @llvm.uadd_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %23 = call <16 x i32> @llvm.uadd_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.uadd_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.uadd_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.uadd_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.uadd_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.uadd_sat.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.uadd_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <4 x i64> @llvm.uadd_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %31 = call <8 x i64> @llvm.uadd_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %32 = call <16 x i64> @llvm.uadd_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.uadd_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.uadd_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.uadd_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.uadd_sat.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.uadd_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.uadd_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.uadd_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.uadd_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.uadd_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.uadd_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.uadd_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.uadd_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.uadd_sat.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.uadd_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.uadd_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.uadd_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.uadd_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.uadd_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.uadd_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.uadd_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.uadd_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.uadd_sat.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.uadd_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.uadd_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.uadd_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.uadd_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.uadd_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.uadd_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.uadd_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.uadd_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.uadd_sat.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.uadd_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.uadd_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.uadd_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.uadd_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.uadd_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.uadd_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.uadd_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @usub_sat() {
+; CHECK-LABEL: 'usub_sat'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.usub_sat.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.usub_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.usub_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.usub_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.usub_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.usub_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.usub_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.usub_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.usub_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.usub_sat.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.usub_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.usub_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.usub_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %14 = call <16 x i16> @llvm.usub_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.usub_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.usub_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.usub_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.usub_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.usub_sat.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.usub_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.usub_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %22 = call <8 x i32> @llvm.usub_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %23 = call <16 x i32> @llvm.usub_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.usub_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.usub_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.usub_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.usub_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.usub_sat.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.usub_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <4 x i64> @llvm.usub_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %31 = call <8 x i64> @llvm.usub_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %32 = call <16 x i64> @llvm.usub_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.usub_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.usub_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.usub_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.usub_sat.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.usub_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.usub_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.usub_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.usub_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.usub_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.usub_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.usub_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.usub_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.usub_sat.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.usub_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.usub_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.usub_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.usub_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.usub_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.usub_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.usub_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.usub_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.usub_sat.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.usub_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.usub_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.usub_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.usub_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.usub_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.usub_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.usub_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.usub_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.usub_sat.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.usub_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.usub_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.usub_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.usub_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.usub_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.usub_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.usub_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @ssub_sat() {
+; CHECK-LABEL: 'ssub_sat'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.ssub_sat.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.ssub_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.ssub_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.ssub_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.ssub_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.ssub_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.ssub_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.ssub_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.ssub_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.ssub_sat.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.ssub_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.ssub_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.ssub_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %14 = call <16 x i16> @llvm.ssub_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.ssub_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.ssub_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.ssub_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.ssub_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.ssub_sat.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.ssub_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.ssub_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %22 = call <8 x i32> @llvm.ssub_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %23 = call <16 x i32> @llvm.ssub_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.ssub_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.ssub_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.ssub_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.ssub_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.ssub_sat.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.ssub_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <4 x i64> @llvm.ssub_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %31 = call <8 x i64> @llvm.ssub_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %32 = call <16 x i64> @llvm.ssub_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.ssub_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.ssub_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.ssub_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.ssub_sat.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.ssub_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.ssub_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.ssub_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.ssub_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.ssub_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.ssub_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.ssub_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.ssub_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.ssub_sat.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.ssub_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.ssub_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.ssub_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.ssub_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.ssub_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.ssub_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.ssub_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.ssub_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.ssub_sat.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.ssub_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.ssub_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.ssub_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.ssub_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.ssub_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.ssub_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.ssub_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.ssub_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.ssub_sat.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.ssub_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.ssub_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.ssub_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.ssub_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.ssub_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.ssub_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.ssub_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @ushl_sat() {
+; CHECK-LABEL: 'ushl_sat'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.ushl_sat.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.ushl_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.ushl_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.ushl_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.ushl_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.ushl_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.ushl_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.ushl_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.ushl_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.ushl_sat.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.ushl_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.ushl_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.ushl_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %14 = call <16 x i16> @llvm.ushl_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.ushl_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.ushl_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.ushl_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.ushl_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.ushl_sat.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.ushl_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.ushl_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %22 = call <8 x i32> @llvm.ushl_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %23 = call <16 x i32> @llvm.ushl_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.ushl_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.ushl_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.ushl_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.ushl_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.ushl_sat.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.ushl_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <4 x i64> @llvm.ushl_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %31 = call <8 x i64> @llvm.ushl_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %32 = call <16 x i64> @llvm.ushl_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.ushl_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.ushl_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.ushl_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.ushl_sat.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.ushl_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.ushl_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.ushl_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.ushl_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.ushl_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.ushl_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.ushl_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.ushl_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.ushl_sat.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.ushl_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.ushl_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.ushl_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.ushl_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.ushl_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.ushl_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.ushl_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.ushl_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.ushl_sat.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.ushl_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.ushl_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.ushl_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.ushl_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.ushl_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.ushl_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.ushl_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.ushl_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.ushl_sat.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.ushl_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.ushl_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.ushl_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.ushl_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.ushl_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.ushl_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.ushl_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+define void @sshl_sat() {
+; CHECK-LABEL: 'sshl_sat'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.sshl_sat.i8(i8 undef, i8 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.sshl_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.sshl_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.sshl_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.sshl_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.sshl_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.sshl_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.sshl_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.sshl_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.sshl_sat.i16(i16 undef, i16 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.sshl_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.sshl_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.sshl_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %14 = call <16 x i16> @llvm.sshl_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.sshl_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.sshl_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.sshl_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.sshl_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.sshl_sat.i32(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.sshl_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.sshl_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %22 = call <8 x i32> @llvm.sshl_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %23 = call <16 x i32> @llvm.sshl_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.sshl_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.sshl_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.sshl_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.sshl_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.sshl_sat.i64(i64 undef, i64 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.sshl_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %30 = call <4 x i64> @llvm.sshl_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %31 = call <8 x i64> @llvm.sshl_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %32 = call <16 x i64> @llvm.sshl_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.sshl_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.sshl_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.sshl_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+ call i8 @llvm.sshl_sat.i8(i8 undef, i8 undef)
+ call <2 x i8> @llvm.sshl_sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+ call <4 x i8> @llvm.sshl_sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+ call <8 x i8> @llvm.sshl_sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+ call <16 x i8> @llvm.sshl_sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+ call <vscale x 2 x i8> @llvm.sshl_sat.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
+ call <vscale x 4 x i8> @llvm.sshl_sat.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
+ call <vscale x 8 x i8> @llvm.sshl_sat.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
+ call <vscale x 16 x i8> @llvm.sshl_sat.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
+ call i16 @llvm.sshl_sat.i16(i16 undef, i16 undef)
+ call <2 x i16> @llvm.sshl_sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+ call <4 x i16> @llvm.sshl_sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+ call <8 x i16> @llvm.sshl_sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+ call <16 x i16> @llvm.sshl_sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+ call <vscale x 2 x i16> @llvm.sshl_sat.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
+ call <vscale x 4 x i16> @llvm.sshl_sat.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
+ call <vscale x 8 x i16> @llvm.sshl_sat.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
+ call <vscale x 16 x i16> @llvm.sshl_sat.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
+ call i32 @llvm.sshl_sat.i32(i32 undef, i32 undef)
+ call <2 x i32> @llvm.sshl_sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+ call <4 x i32> @llvm.sshl_sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+ call <8 x i32> @llvm.sshl_sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+ call <16 x i32> @llvm.sshl_sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+ call <vscale x 2 x i32> @llvm.sshl_sat.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
+ call <vscale x 4 x i32> @llvm.sshl_sat.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
+ call <vscale x 8 x i32> @llvm.sshl_sat.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
+ call <vscale x 16 x i32> @llvm.sshl_sat.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
+ call i64 @llvm.sshl_sat.i64(i64 undef, i64 undef)
+ call <2 x i64> @llvm.sshl_sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+ call <4 x i64> @llvm.sshl_sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+ call <8 x i64> @llvm.sshl_sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+ call <16 x i64> @llvm.sshl_sat.v16i64(<16 x i64> undef, <16 x i64> undef)
+ call <vscale x 2 x i64> @llvm.sshl_sat.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
+ call <vscale x 4 x i64> @llvm.sshl_sat.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
+ call <vscale x 8 x i64> @llvm.sshl_sat.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
+ ret void
+}
+
+declare i8 @llvm.sadd_sat.i8(i8, i8)
+declare <2 x i8> @llvm.sadd_sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.sadd_sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.sadd_sat.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.sadd_sat.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.sadd_sat.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.sadd_sat.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.sadd_sat.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.sadd_sat.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.sadd_sat.i16(i16, i16)
+declare <2 x i16> @llvm.sadd_sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.sadd_sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.sadd_sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.sadd_sat.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.sadd_sat.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.sadd_sat.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.sadd_sat.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.sadd_sat.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.sadd_sat.i32(i32, i32)
+declare <2 x i32> @llvm.sadd_sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.sadd_sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.sadd_sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.sadd_sat.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.sadd_sat.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.sadd_sat.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.sadd_sat.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.sadd_sat.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.sadd_sat.i64(i64, i64)
+declare <2 x i64> @llvm.sadd_sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.sadd_sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.sadd_sat.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.sadd_sat.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.sadd_sat.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.sadd_sat.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.sadd_sat.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.uadd_sat.i8(i8, i8)
+declare <2 x i8> @llvm.uadd_sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.uadd_sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.uadd_sat.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.uadd_sat.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.uadd_sat.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.uadd_sat.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.uadd_sat.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.uadd_sat.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.uadd_sat.i16(i16, i16)
+declare <2 x i16> @llvm.uadd_sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.uadd_sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.uadd_sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.uadd_sat.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.uadd_sat.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.uadd_sat.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.uadd_sat.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.uadd_sat.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.uadd_sat.i32(i32, i32)
+declare <2 x i32> @llvm.uadd_sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.uadd_sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.uadd_sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.uadd_sat.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.uadd_sat.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.uadd_sat.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.uadd_sat.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.uadd_sat.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.uadd_sat.i64(i64, i64)
+declare <2 x i64> @llvm.uadd_sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.uadd_sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.uadd_sat.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.uadd_sat.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.uadd_sat.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.uadd_sat.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.uadd_sat.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.usub_sat.i8(i8, i8)
+declare <2 x i8> @llvm.usub_sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.usub_sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.usub_sat.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.usub_sat.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.usub_sat.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.usub_sat.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.usub_sat.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.usub_sat.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.usub_sat.i16(i16, i16)
+declare <2 x i16> @llvm.usub_sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.usub_sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.usub_sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.usub_sat.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.usub_sat.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.usub_sat.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.usub_sat.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.usub_sat.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.usub_sat.i32(i32, i32)
+declare <2 x i32> @llvm.usub_sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.usub_sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.usub_sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.usub_sat.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.usub_sat.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.usub_sat.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.usub_sat.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.usub_sat.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.usub_sat.i64(i64, i64)
+declare <2 x i64> @llvm.usub_sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.usub_sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.usub_sat.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.usub_sat.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.usub_sat.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.usub_sat.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.usub_sat.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.ssub_sat.i8(i8, i8)
+declare <2 x i8> @llvm.ssub_sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.ssub_sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.ssub_sat.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.ssub_sat.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.ssub_sat.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.ssub_sat.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.ssub_sat.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.ssub_sat.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.ssub_sat.i16(i16, i16)
+declare <2 x i16> @llvm.ssub_sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.ssub_sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.ssub_sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.ssub_sat.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.ssub_sat.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.ssub_sat.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.ssub_sat.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.ssub_sat.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.ssub_sat.i32(i32, i32)
+declare <2 x i32> @llvm.ssub_sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.ssub_sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.ssub_sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.ssub_sat.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.ssub_sat.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.ssub_sat.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.ssub_sat.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.ssub_sat.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.ssub_sat.i64(i64, i64)
+declare <2 x i64> @llvm.ssub_sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.ssub_sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.ssub_sat.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.ssub_sat.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.ssub_sat.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.ssub_sat.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.ssub_sat.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.ushl_sat.i8(i8, i8)
+declare <2 x i8> @llvm.ushl_sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.ushl_sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.ushl_sat.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.ushl_sat.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.ushl_sat.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.ushl_sat.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.ushl_sat.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.ushl_sat.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.ushl_sat.i16(i16, i16)
+declare <2 x i16> @llvm.ushl_sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.ushl_sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.ushl_sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.ushl_sat.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.ushl_sat.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.ushl_sat.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.ushl_sat.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.ushl_sat.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.ushl_sat.i32(i32, i32)
+declare <2 x i32> @llvm.ushl_sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.ushl_sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.ushl_sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.ushl_sat.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.ushl_sat.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.ushl_sat.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.ushl_sat.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.ushl_sat.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.ushl_sat.i64(i64, i64)
+declare <2 x i64> @llvm.ushl_sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.ushl_sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.ushl_sat.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.ushl_sat.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.ushl_sat.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.ushl_sat.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.ushl_sat.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
+
+declare i8 @llvm.sshl_sat.i8(i8, i8)
+declare <2 x i8> @llvm.sshl_sat.v2i8(<2 x i8>, <2 x i8>)
+declare <4 x i8> @llvm.sshl_sat.v4i8(<4 x i8>, <4 x i8>)
+declare <8 x i8> @llvm.sshl_sat.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.sshl_sat.v16i8(<16 x i8>, <16 x i8>)
+declare <vscale x 2 x i8> @llvm.sshl_sat.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.sshl_sat.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.sshl_sat.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.sshl_sat.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.sshl_sat.i16(i16, i16)
+declare <2 x i16> @llvm.sshl_sat.v2i16(<2 x i16>, <2 x i16>)
+declare <4 x i16> @llvm.sshl_sat.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.sshl_sat.v8i16(<8 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.sshl_sat.v16i16(<16 x i16>, <16 x i16>)
+declare <vscale x 2 x i16> @llvm.sshl_sat.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.sshl_sat.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.sshl_sat.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.sshl_sat.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
+declare i32 @llvm.sshl_sat.i32(i32, i32)
+declare <2 x i32> @llvm.sshl_sat.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.sshl_sat.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.sshl_sat.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.sshl_sat.v16i32(<16 x i32>, <16 x i32>)
+declare <vscale x 2 x i32> @llvm.sshl_sat.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.sshl_sat.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.sshl_sat.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.sshl_sat.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
+declare i64 @llvm.sshl_sat.i64(i64, i64)
+declare <2 x i64> @llvm.sshl_sat.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.sshl_sat.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.sshl_sat.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.sshl_sat.v16i64(<16 x i64>, <16 x i64>)
+declare <vscale x 2 x i64> @llvm.sshl_sat.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.sshl_sat.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.sshl_sat.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/sqrt.ll b/llvm/test/Analysis/CostModel/RISCV/sqrt.ll
deleted file mode 100644
index e5c3ab554fa9f..0000000000000
--- a/llvm/test/Analysis/CostModel/RISCV/sqrt.ll
+++ /dev/null
@@ -1,64 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d -riscv-v-vector-bits-min=-1 | FileCheck %s
-
-define void @sqrt() {
-; CHECK-LABEL: 'sqrt'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.sqrt.f32(float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x float> @llvm.sqrt.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x float> @llvm.sqrt.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x float> @llvm.sqrt.nxv8f32(<vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x float> @llvm.sqrt.nxv16f32(<vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call double @llvm.sqrt.f64(double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x double> @llvm.sqrt.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x double> @llvm.sqrt.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x double> @llvm.sqrt.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x double> @llvm.sqrt.nxv1f64(<vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x double> @llvm.sqrt.nxv4f64(<vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x double> @llvm.sqrt.nxv8f64(<vscale x 8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- call float @llvm.sqrt.f32(float undef)
- call <2 x float> @llvm.sqrt.v2f32(<2 x float> undef)
- call <4 x float> @llvm.sqrt.v4f32(<4 x float> undef)
- call <8 x float> @llvm.sqrt.v8f32(<8 x float> undef)
- call <16 x float> @llvm.sqrt.v16f32(<16 x float> undef)
- call <vscale x 2 x float> @llvm.sqrt.nvx2f32(<vscale x 2 x float> undef)
- call <vscale x 4 x float> @llvm.sqrt.nvx4f32(<vscale x 4 x float> undef)
- call <vscale x 8 x float> @llvm.sqrt.nvx8f32(<vscale x 8 x float> undef)
- call <vscale x 16 x float> @llvm.sqrt.nvx16f32(<vscale x 16 x float> undef)
- call double @llvm.sqrt.f64(double undef)
- call <2 x double> @llvm.sqrt.v2f64(<2 x double> undef)
- call <4 x double> @llvm.sqrt.v4f64(<4 x double> undef)
- call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef)
- call <16 x double> @llvm.sqrt.v16f64(<16 x double> undef)
- call <vscale x 1 x double> @llvm.sqrt.nvx1f64(<vscale x 1 x double> undef)
- call <vscale x 2 x double> @llvm.sqrt.nvx2f64(<vscale x 2 x double> undef)
- call <vscale x 4 x double> @llvm.sqrt.nvx4f64(<vscale x 4 x double> undef)
- call <vscale x 8 x double> @llvm.sqrt.nvx8f64(<vscale x 8 x double> undef)
- ret void
-}
-
-declare float @llvm.sqrt.f32(float)
-declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
-declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
-declare <8 x float> @llvm.sqrt.v8f32(<8 x float>)
-declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
-declare <vscale x 2 x float> @llvm.sqrt.nvx2f32(<vscale x 2 x float>)
-declare <vscale x 4 x float> @llvm.sqrt.nvx4f32(<vscale x 4 x float>)
-declare <vscale x 8 x float> @llvm.sqrt.nvx8f32(<vscale x 8 x float>)
-declare <vscale x 16 x float> @llvm.sqrt.nvx16f32(<vscale x 16 x float>)
-declare double @llvm.sqrt.f64(double)
-declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
-declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
-declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
-declare <16 x double> @llvm.sqrt.v16f64(<16 x double>)
-declare <vscale x 1 x double> @llvm.sqrt.nvx1f64(<vscale x 1 x double>)
-declare <vscale x 2 x double> @llvm.sqrt.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.sqrt.nvx4f64(<vscale x 4 x double>)
-declare <vscale x 8 x double> @llvm.sqrt.nvx8f64(<vscale x 8 x double>)
More information about the llvm-commits
mailing list