[llvm] cb3f32a - [RISCV] Add cost model coverage for integer bitmanip intrinsics

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 24 15:11:41 PDT 2022


Author: Philip Reames
Date: 2022-08-24T15:09:21-07:00
New Revision: cb3f32a20d7091900a115ded426a8ae493dad4df

URL: https://github.com/llvm/llvm-project/commit/cb3f32a20d7091900a115ded426a8ae493dad4df
DIFF: https://github.com/llvm/llvm-project/commit/cb3f32a20d7091900a115ded426a8ae493dad4df.diff

LOG: [RISCV] Add cost model coverage for integer bitmanip intrinsics

Added: 
    llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll b/llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll
new file mode 100644
index 000000000000..decf95973e28
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/integer-bit-manip.ll
@@ -0,0 +1,555 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d -riscv-v-vector-bits-min=-1 | FileCheck %s
+
+define void @bswap() {
+; CHECK-LABEL: 'bswap'
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %1 = call i16 @llvm.bswap.i16(i16 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i16> @llvm.bswap.nxv2i16(<vscale x 2 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i16> @llvm.bswap.nxv4i16(<vscale x 4 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i16> @llvm.bswap.nxv16i16(<vscale x 16 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %10 = call i32 @llvm.bswap.i32(i32 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x i32> @llvm.bswap.v16i32(<16 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i32> @llvm.bswap.nxv2i32(<vscale x 2 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i32> @llvm.bswap.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i32> @llvm.bswap.nxv16i32(<vscale x 16 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %19 = call i64 @llvm.bswap.i64(i64 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %20 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %21 = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %22 = call <8 x i64> @llvm.bswap.v8i64(<8 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %23 = call <16 x i64> @llvm.bswap.v16i64(<16 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i64> @llvm.bswap.nxv4i64(<vscale x 4 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i64> @llvm.bswap.nxv8i64(<vscale x 8 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i64> @llvm.bswap.nxv16i64(<vscale x 16 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+  call i16 @llvm.bswap.i16(i16 undef)
+  call <2 x i16> @llvm.bswap.v2i16(<2 x i16> undef)
+  call <4 x i16> @llvm.bswap.v4i16(<4 x i16> undef)
+  call <8 x i16> @llvm.bswap.v8i16(<8 x i16> undef)
+  call <16 x i16> @llvm.bswap.v16i16(<16 x i16> undef)
+  call <vscale x 2 x i16> @llvm.bswap.nvx2i16(<vscale x 2 x i16> undef)
+  call <vscale x 4 x i16> @llvm.bswap.nvx4i16(<vscale x 4 x i16> undef)
+  call <vscale x 8 x i16> @llvm.bswap.nvx8i16(<vscale x 8 x i16> undef)
+  call <vscale x 16 x i16> @llvm.bswap.nvx16i16(<vscale x 16 x i16> undef)
+  call i32 @llvm.bswap.i32(i32 undef)
+  call <2 x i32> @llvm.bswap.v2i32(<2 x i32> undef)
+  call <4 x i32> @llvm.bswap.v4i32(<4 x i32> undef)
+  call <8 x i32> @llvm.bswap.v8i32(<8 x i32> undef)
+  call <16 x i32> @llvm.bswap.v16i32(<16 x i32> undef)
+  call <vscale x 2 x i32> @llvm.bswap.nvx2i32(<vscale x 2 x i32> undef)
+  call <vscale x 4 x i32> @llvm.bswap.nvx4i32(<vscale x 4 x i32> undef)
+  call <vscale x 8 x i32> @llvm.bswap.nvx8i32(<vscale x 8 x i32> undef)
+  call <vscale x 16 x i32> @llvm.bswap.nvx16i32(<vscale x 16 x i32> undef)
+  call i64 @llvm.bswap.i64(i64 undef)
+  call <2 x i64> @llvm.bswap.v2i64(<2 x i64> undef)
+  call <4 x i64> @llvm.bswap.v4i64(<4 x i64> undef)
+  call <8 x i64> @llvm.bswap.v8i64(<8 x i64> undef)
+  call <16 x i64> @llvm.bswap.v16i64(<16 x i64> undef)
+  call <vscale x 2 x i64> @llvm.bswap.nvx2i64(<vscale x 2 x i64> undef)
+  call <vscale x 4 x i64> @llvm.bswap.nvx4i64(<vscale x 4 x i64> undef)
+  call <vscale x 8 x i64> @llvm.bswap.nvx8i64(<vscale x 8 x i64> undef)
+  call <vscale x 16 x i64> @llvm.bswap.nvx16i64(<vscale x 16 x i64> undef)
+  ret void
+}
+
+define void @bitreverse() {
+; CHECK-LABEL: 'bitreverse'
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %1 = call i8 @llvm.bitreverse.i8(i8 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x i8> @llvm.bitreverse.v2i8(<2 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x i8> @llvm.bitreverse.v4i8(<4 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.bitreverse.nxv2i8(<vscale x 2 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.bitreverse.nxv4i8(<vscale x 4 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.bitreverse.nxv8i8(<vscale x 8 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %10 = call i16 @llvm.bitreverse.i16(i16 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x i16> @llvm.bitreverse.v4i16(<4 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.bitreverse.nxv2i16(<vscale x 2 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.bitreverse.nxv4i16(<vscale x 4 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.bitreverse.nxv16i16(<vscale x 16 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %19 = call i32 @llvm.bitreverse.i32(i32 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %20 = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %21 = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %22 = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %23 = call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.bitreverse.nxv2i32(<vscale x 2 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.bitreverse.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.bitreverse.nxv16i32(<vscale x 16 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %28 = call i64 @llvm.bitreverse.i64(i64 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %29 = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %30 = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %31 = call <8 x i64> @llvm.bitreverse.v8i64(<8 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %32 = call <16 x i64> @llvm.bitreverse.v16i64(<16 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.bitreverse.nxv4i64(<vscale x 4 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.bitreverse.nxv8i64(<vscale x 8 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %36 = call <vscale x 16 x i64> @llvm.bitreverse.nxv16i64(<vscale x 16 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+  call i8 @llvm.bitreverse.i8(i8 undef)
+  call <2 x i8> @llvm.bitreverse.v2i8(<2 x i8> undef)
+  call <4 x i8> @llvm.bitreverse.v4i8(<4 x i8> undef)
+  call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> undef)
+  call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> undef)
+  call <vscale x 2 x i8> @llvm.bitreverse.nvx2i8(<vscale x 2 x i8> undef)
+  call <vscale x 4 x i8> @llvm.bitreverse.nvx4i8(<vscale x 4 x i8> undef)
+  call <vscale x 8 x i8> @llvm.bitreverse.nvx8i8(<vscale x 8 x i8> undef)
+  call <vscale x 16 x i8> @llvm.bitreverse.nvx16i8(<vscale x 16 x i8> undef)
+  call i16 @llvm.bitreverse.i16(i16 undef)
+  call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> undef)
+  call <4 x i16> @llvm.bitreverse.v4i16(<4 x i16> undef)
+  call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> undef)
+  call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> undef)
+  call <vscale x 2 x i16> @llvm.bitreverse.nvx2i16(<vscale x 2 x i16> undef)
+  call <vscale x 4 x i16> @llvm.bitreverse.nvx4i16(<vscale x 4 x i16> undef)
+  call <vscale x 8 x i16> @llvm.bitreverse.nvx8i16(<vscale x 8 x i16> undef)
+  call <vscale x 16 x i16> @llvm.bitreverse.nvx16i16(<vscale x 16 x i16> undef)
+  call i32 @llvm.bitreverse.i32(i32 undef)
+  call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> undef)
+  call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> undef)
+  call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> undef)
+  call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> undef)
+  call <vscale x 2 x i32> @llvm.bitreverse.nvx2i32(<vscale x 2 x i32> undef)
+  call <vscale x 4 x i32> @llvm.bitreverse.nvx4i32(<vscale x 4 x i32> undef)
+  call <vscale x 8 x i32> @llvm.bitreverse.nvx8i32(<vscale x 8 x i32> undef)
+  call <vscale x 16 x i32> @llvm.bitreverse.nvx16i32(<vscale x 16 x i32> undef)
+  call i64 @llvm.bitreverse.i64(i64 undef)
+  call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> undef)
+  call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> undef)
+  call <8 x i64> @llvm.bitreverse.v8i64(<8 x i64> undef)
+  call <16 x i64> @llvm.bitreverse.v16i64(<16 x i64> undef)
+  call <vscale x 2 x i64> @llvm.bitreverse.nvx2i64(<vscale x 2 x i64> undef)
+  call <vscale x 4 x i64> @llvm.bitreverse.nvx4i64(<vscale x 4 x i64> undef)
+  call <vscale x 8 x i64> @llvm.bitreverse.nvx8i64(<vscale x 8 x i64> undef)
+  call <vscale x 16 x i64> @llvm.bitreverse.nvx16i64(<vscale x 16 x i64> undef)
+  ret void
+}
+
+define void @ctpop() {
+; CHECK-LABEL: 'ctpop'
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %1 = call i8 @llvm.ctpop.i8(i8 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %2 = call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 20 for instruction: %3 = call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 40 for instruction: %4 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 80 for instruction: %5 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.ctpop.nxv2i8(<vscale x 2 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.ctpop.nxv4i8(<vscale x 4 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.ctpop.nxv8i8(<vscale x 8 x i8> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.ctpop.nxv16i8(<vscale x 16 x i8> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %10 = call i16 @llvm.ctpop.i16(i16 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %11 = call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 20 for instruction: %12 = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 40 for instruction: %13 = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 80 for instruction: %14 = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.ctpop.nxv2i16(<vscale x 2 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.ctpop.nxv4i16(<vscale x 4 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.ctpop.nxv8i16(<vscale x 8 x i16> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.ctpop.nxv16i16(<vscale x 16 x i16> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %19 = call i32 @llvm.ctpop.i32(i32 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %20 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 20 for instruction: %21 = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 40 for instruction: %22 = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 80 for instruction: %23 = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.ctpop.nxv2i32(<vscale x 2 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.ctpop.nxv4i32(<vscale x 4 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.ctpop.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %28 = call i64 @llvm.ctpop.i64(i64 undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %29 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 20 for instruction: %30 = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 40 for instruction: %31 = call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 80 for instruction: %32 = call <16 x i64> @llvm.ctpop.v16i64(<16 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.ctpop.nxv4i64(<vscale x 4 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> undef)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %36 = call <vscale x 16 x i64> @llvm.ctpop.nxv16i64(<vscale x 16 x i64> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+  call i8 @llvm.ctpop.i8(i8 undef)
+  call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> undef)
+  call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> undef)
+  call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> undef)
+  call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> undef)
+  call <vscale x 2 x i8> @llvm.ctpop.nvx2i8(<vscale x 2 x i8> undef)
+  call <vscale x 4 x i8> @llvm.ctpop.nvx4i8(<vscale x 4 x i8> undef)
+  call <vscale x 8 x i8> @llvm.ctpop.nvx8i8(<vscale x 8 x i8> undef)
+  call <vscale x 16 x i8> @llvm.ctpop.nvx16i8(<vscale x 16 x i8> undef)
+  call i16 @llvm.ctpop.i16(i16 undef)
+  call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> undef)
+  call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> undef)
+  call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> undef)
+  call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> undef)
+  call <vscale x 2 x i16> @llvm.ctpop.nvx2i16(<vscale x 2 x i16> undef)
+  call <vscale x 4 x i16> @llvm.ctpop.nvx4i16(<vscale x 4 x i16> undef)
+  call <vscale x 8 x i16> @llvm.ctpop.nvx8i16(<vscale x 8 x i16> undef)
+  call <vscale x 16 x i16> @llvm.ctpop.nvx16i16(<vscale x 16 x i16> undef)
+  call i32 @llvm.ctpop.i32(i32 undef)
+  call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> undef)
+  call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> undef)
+  call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> undef)
+  call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> undef)
+  call <vscale x 2 x i32> @llvm.ctpop.nvx2i32(<vscale x 2 x i32> undef)
+  call <vscale x 4 x i32> @llvm.ctpop.nvx4i32(<vscale x 4 x i32> undef)
+  call <vscale x 8 x i32> @llvm.ctpop.nvx8i32(<vscale x 8 x i32> undef)
+  call <vscale x 16 x i32> @llvm.ctpop.nvx16i32(<vscale x 16 x i32> undef)
+  call i64 @llvm.ctpop.i64(i64 undef)
+  call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> undef)
+  call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> undef)
+  call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> undef)
+  call <16 x i64> @llvm.ctpop.v16i64(<16 x i64> undef)
+  call <vscale x 2 x i64> @llvm.ctpop.nvx2i64(<vscale x 2 x i64> undef)
+  call <vscale x 4 x i64> @llvm.ctpop.nvx4i64(<vscale x 4 x i64> undef)
+  call <vscale x 8 x i64> @llvm.ctpop.nvx8i64(<vscale x 8 x i64> undef)
+  call <vscale x 16 x i64> @llvm.ctpop.nvx16i64(<vscale x 16 x i64> undef)
+  ret void
+}
+
+define void @ctlz() {
+; CHECK-LABEL: 'ctlz'
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %1 = call i8 @llvm.ctlz.i8(i8 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x i8> @llvm.ctlz.v4i8(<4 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.ctlz.nxv2i8(<vscale x 2 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.ctlz.nxv4i8(<vscale x 4 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.ctlz.nxv8i8(<vscale x 8 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %10 = call i16 @llvm.ctlz.i16(i16 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.ctlz.nxv2i16(<vscale x 2 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.ctlz.nxv4i16(<vscale x 4 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.ctlz.nxv16i16(<vscale x 16 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %19 = call i32 @llvm.ctlz.i32(i32 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %20 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %21 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %22 = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %23 = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.ctlz.nxv2i32(<vscale x 2 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.ctlz.nxv8i32(<vscale x 8 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.ctlz.nxv16i32(<vscale x 16 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %28 = call i64 @llvm.ctlz.i64(i64 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %29 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %30 = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %31 = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %32 = call <16 x i64> @llvm.ctlz.v16i64(<16 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.ctlz.nxv4i64(<vscale x 4 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.ctlz.nxv8i64(<vscale x 8 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %36 = call <vscale x 16 x i64> @llvm.ctlz.nxv16i64(<vscale x 16 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+  call i8 @llvm.ctlz.i8(i8 undef)
+  call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> undef)
+  call <4 x i8> @llvm.ctlz.v4i8(<4 x i8> undef)
+  call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> undef)
+  call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> undef)
+  call <vscale x 2 x i8> @llvm.ctlz.nvx2i8(<vscale x 2 x i8> undef)
+  call <vscale x 4 x i8> @llvm.ctlz.nvx4i8(<vscale x 4 x i8> undef)
+  call <vscale x 8 x i8> @llvm.ctlz.nvx8i8(<vscale x 8 x i8> undef)
+  call <vscale x 16 x i8> @llvm.ctlz.nvx16i8(<vscale x 16 x i8> undef)
+  call i16 @llvm.ctlz.i16(i16 undef)
+  call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> undef)
+  call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> undef)
+  call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> undef)
+  call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> undef)
+  call <vscale x 2 x i16> @llvm.ctlz.nvx2i16(<vscale x 2 x i16> undef)
+  call <vscale x 4 x i16> @llvm.ctlz.nvx4i16(<vscale x 4 x i16> undef)
+  call <vscale x 8 x i16> @llvm.ctlz.nvx8i16(<vscale x 8 x i16> undef)
+  call <vscale x 16 x i16> @llvm.ctlz.nvx16i16(<vscale x 16 x i16> undef)
+  call i32 @llvm.ctlz.i32(i32 undef)
+  call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> undef)
+  call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> undef)
+  call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> undef)
+  call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> undef)
+  call <vscale x 2 x i32> @llvm.ctlz.nvx2i32(<vscale x 2 x i32> undef)
+  call <vscale x 4 x i32> @llvm.ctlz.nvx4i32(<vscale x 4 x i32> undef)
+  call <vscale x 8 x i32> @llvm.ctlz.nvx8i32(<vscale x 8 x i32> undef)
+  call <vscale x 16 x i32> @llvm.ctlz.nvx16i32(<vscale x 16 x i32> undef)
+  call i64 @llvm.ctlz.i64(i64 undef)
+  call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> undef)
+  call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> undef)
+  call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> undef)
+  call <16 x i64> @llvm.ctlz.v16i64(<16 x i64> undef)
+  call <vscale x 2 x i64> @llvm.ctlz.nvx2i64(<vscale x 2 x i64> undef)
+  call <vscale x 4 x i64> @llvm.ctlz.nvx4i64(<vscale x 4 x i64> undef)
+  call <vscale x 8 x i64> @llvm.ctlz.nvx8i64(<vscale x 8 x i64> undef)
+  call <vscale x 16 x i64> @llvm.ctlz.nvx16i64(<vscale x 16 x i64> undef)
+  ret void
+}
+
+define void @cttz() {
+; CHECK-LABEL: 'cttz'
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %1 = call i8 @llvm.cttz.i8(i8 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %2 = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %3 = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %4 = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %5 = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.cttz.nxv2i8(<vscale x 2 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.cttz.nxv4i8(<vscale x 4 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.cttz.nxv8i8(<vscale x 8 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.cttz.nxv16i8(<vscale x 16 x i8> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %10 = call i16 @llvm.cttz.i16(i16 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %11 = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %12 = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %13 = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %14 = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.cttz.nxv2i16(<vscale x 2 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.cttz.nxv4i16(<vscale x 4 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.cttz.nxv8i16(<vscale x 8 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.cttz.nxv16i16(<vscale x 16 x i16> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %19 = call i32 @llvm.cttz.i32(i32 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %20 = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %21 = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %22 = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %23 = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.cttz.nxv2i32(<vscale x 2 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.cttz.nxv8i32(<vscale x 8 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.cttz.nxv16i32(<vscale x 16 x i32> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %28 = call i64 @llvm.cttz.i64(i64 undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %29 = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %30 = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 88 for instruction: %31 = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 176 for instruction: %32 = call <16 x i64> @llvm.cttz.v16i64(<16 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.cttz.nxv2i64(<vscale x 2 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.cttz.nxv4i64(<vscale x 4 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.cttz.nxv8i64(<vscale x 8 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %36 = call <vscale x 16 x i64> @llvm.cttz.nxv16i64(<vscale x 16 x i64> undef, i1 false)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
+;
+  call i8 @llvm.cttz.i8(i8 undef)
+  call <2 x i8> @llvm.cttz.v2i8(<2 x i8> undef)
+  call <4 x i8> @llvm.cttz.v4i8(<4 x i8> undef)
+  call <8 x i8> @llvm.cttz.v8i8(<8 x i8> undef)
+  call <16 x i8> @llvm.cttz.v16i8(<16 x i8> undef)
+  call <vscale x 2 x i8> @llvm.cttz.nvx2i8(<vscale x 2 x i8> undef)
+  call <vscale x 4 x i8> @llvm.cttz.nvx4i8(<vscale x 4 x i8> undef)
+  call <vscale x 8 x i8> @llvm.cttz.nvx8i8(<vscale x 8 x i8> undef)
+  call <vscale x 16 x i8> @llvm.cttz.nvx16i8(<vscale x 16 x i8> undef)
+  call i16 @llvm.cttz.i16(i16 undef)
+  call <2 x i16> @llvm.cttz.v2i16(<2 x i16> undef)
+  call <4 x i16> @llvm.cttz.v4i16(<4 x i16> undef)
+  call <8 x i16> @llvm.cttz.v8i16(<8 x i16> undef)
+  call <16 x i16> @llvm.cttz.v16i16(<16 x i16> undef)
+  call <vscale x 2 x i16> @llvm.cttz.nvx2i16(<vscale x 2 x i16> undef)
+  call <vscale x 4 x i16> @llvm.cttz.nvx4i16(<vscale x 4 x i16> undef)
+  call <vscale x 8 x i16> @llvm.cttz.nvx8i16(<vscale x 8 x i16> undef)
+  call <vscale x 16 x i16> @llvm.cttz.nvx16i16(<vscale x 16 x i16> undef)
+  call i32 @llvm.cttz.i32(i32 undef)
+  call <2 x i32> @llvm.cttz.v2i32(<2 x i32> undef)
+  call <4 x i32> @llvm.cttz.v4i32(<4 x i32> undef)
+  call <8 x i32> @llvm.cttz.v8i32(<8 x i32> undef)
+  call <16 x i32> @llvm.cttz.v16i32(<16 x i32> undef)
+  call <vscale x 2 x i32> @llvm.cttz.nvx2i32(<vscale x 2 x i32> undef)
+  call <vscale x 4 x i32> @llvm.cttz.nvx4i32(<vscale x 4 x i32> undef)
+  call <vscale x 8 x i32> @llvm.cttz.nvx8i32(<vscale x 8 x i32> undef)
+  call <vscale x 16 x i32> @llvm.cttz.nvx16i32(<vscale x 16 x i32> undef)
+  call i64 @llvm.cttz.i64(i64 undef)
+  call <2 x i64> @llvm.cttz.v2i64(<2 x i64> undef)
+  call <4 x i64> @llvm.cttz.v4i64(<4 x i64> undef)
+  call <8 x i64> @llvm.cttz.v8i64(<8 x i64> undef)
+  call <16 x i64> @llvm.cttz.v16i64(<16 x i64> undef)
+  call <vscale x 2 x i64> @llvm.cttz.nvx2i64(<vscale x 2 x i64> undef)
+  call <vscale x 4 x i64> @llvm.cttz.nvx4i64(<vscale x 4 x i64> undef)
+  call <vscale x 8 x i64> @llvm.cttz.nvx8i64(<vscale x 8 x i64> undef)
+  call <vscale x 16 x i64> @llvm.cttz.nvx16i64(<vscale x 16 x i64> undef)
+  ret void
+}
+
+declare i16 @llvm.bswap.i16(i16)
+declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
+declare <vscale x 2 x i16> @llvm.bswap.nvx2i16(<vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.bswap.nvx4i16(<vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.bswap.nvx8i16(<vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.bswap.nvx16i16(<vscale x 16 x i16>)
+declare i32 @llvm.bswap.i32(i32)
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.bswap.v16i32(<16 x i32>)
+declare <vscale x 2 x i32> @llvm.bswap.nvx2i32(<vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.bswap.nvx4i32(<vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.bswap.nvx8i32(<vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.bswap.nvx16i32(<vscale x 16 x i32>)
+declare i64 @llvm.bswap.i64(i64)
+declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.bswap.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.bswap.v16i64(<16 x i64>)
+declare <vscale x 2 x i64> @llvm.bswap.nvx2i64(<vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.bswap.nvx4i64(<vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.bswap.nvx8i64(<vscale x 8 x i64>)
+declare <vscale x 16 x i64> @llvm.bswap.nvx16i64(<vscale x 16 x i64>)
+
+declare i8 @llvm.bitreverse.i8(i8)
+declare <2 x i8> @llvm.bitreverse.v2i8(<2 x i8>)
+declare <4 x i8> @llvm.bitreverse.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>)
+declare <vscale x 2 x i8> @llvm.bitreverse.nvx2i8(<vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.bitreverse.nvx4i8(<vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.bitreverse.nvx8i8(<vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.bitreverse.nvx16i8(<vscale x 16 x i8>)
+declare i16 @llvm.bitreverse.i16(i16)
+declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.bitreverse.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)
+declare <vscale x 2 x i16> @llvm.bitreverse.nvx2i16(<vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.bitreverse.nvx4i16(<vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.bitreverse.nvx8i16(<vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.bitreverse.nvx16i16(<vscale x 16 x i16>)
+declare i32 @llvm.bitreverse.i32(i32)
+declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.bitreverse.v16i32(<16 x i32>)
+declare <vscale x 2 x i32> @llvm.bitreverse.nvx2i32(<vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.bitreverse.nvx4i32(<vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.bitreverse.nvx8i32(<vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.bitreverse.nvx16i32(<vscale x 16 x i32>)
+declare i64 @llvm.bitreverse.i64(i64)
+declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.bitreverse.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.bitreverse.v16i64(<16 x i64>)
+declare <vscale x 2 x i64> @llvm.bitreverse.nvx2i64(<vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.bitreverse.nvx4i64(<vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.bitreverse.nvx8i64(<vscale x 8 x i64>)
+declare <vscale x 16 x i64> @llvm.bitreverse.nvx16i64(<vscale x 16 x i64>)
+
+declare i8 @llvm.ctpop.i8(i8)
+declare <2 x i8> @llvm.ctpop.v2i8(<2 x i8>)
+declare <4 x i8> @llvm.ctpop.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
+declare <vscale x 2 x i8> @llvm.ctpop.nvx2i8(<vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.ctpop.nvx4i8(<vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.ctpop.nvx8i8(<vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.ctpop.nvx16i8(<vscale x 16 x i8>)
+declare i16 @llvm.ctpop.i16(i16)
+declare <2 x i16> @llvm.ctpop.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
+declare <vscale x 2 x i16> @llvm.ctpop.nvx2i16(<vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.ctpop.nvx4i16(<vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.ctpop.nvx8i16(<vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.ctpop.nvx16i16(<vscale x 16 x i16>)
+declare i32 @llvm.ctpop.i32(i32)
+declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>)
+declare <vscale x 2 x i32> @llvm.ctpop.nvx2i32(<vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.ctpop.nvx4i32(<vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.ctpop.nvx8i32(<vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.ctpop.nvx16i32(<vscale x 16 x i32>)
+declare i64 @llvm.ctpop.i64(i64)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>)
+declare <vscale x 2 x i64> @llvm.ctpop.nvx2i64(<vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.ctpop.nvx4i64(<vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.ctpop.nvx8i64(<vscale x 8 x i64>)
+declare <vscale x 16 x i64> @llvm.ctpop.nvx16i64(<vscale x 16 x i64>)
+
+declare i8 @llvm.ctlz.i8(i8)
+declare <2 x i8> @llvm.ctlz.v2i8(<2 x i8>)
+declare <4 x i8> @llvm.ctlz.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>)
+declare <vscale x 2 x i8> @llvm.ctlz.nvx2i8(<vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.ctlz.nvx4i8(<vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.ctlz.nvx8i8(<vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.ctlz.nvx16i8(<vscale x 16 x i8>)
+declare i16 @llvm.ctlz.i16(i16)
+declare <2 x i16> @llvm.ctlz.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>)
+declare <vscale x 2 x i16> @llvm.ctlz.nvx2i16(<vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.ctlz.nvx4i16(<vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.ctlz.nvx8i16(<vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.ctlz.nvx16i16(<vscale x 16 x i16>)
+declare i32 @llvm.ctlz.i32(i32)
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>)
+declare <vscale x 2 x i32> @llvm.ctlz.nvx2i32(<vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.ctlz.nvx4i32(<vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.ctlz.nvx8i32(<vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.ctlz.nvx16i32(<vscale x 16 x i32>)
+declare i64 @llvm.ctlz.i64(i64)
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.ctlz.v16i64(<16 x i64>)
+declare <vscale x 2 x i64> @llvm.ctlz.nvx2i64(<vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.ctlz.nvx4i64(<vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.ctlz.nvx8i64(<vscale x 8 x i64>)
+declare <vscale x 16 x i64> @llvm.ctlz.nvx16i64(<vscale x 16 x i64>)
+
+declare i8 @llvm.cttz.i8(i8)
+declare <2 x i8> @llvm.cttz.v2i8(<2 x i8>)
+declare <4 x i8> @llvm.cttz.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.cttz.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>)
+declare <vscale x 2 x i8> @llvm.cttz.nvx2i8(<vscale x 2 x i8>)
+declare <vscale x 4 x i8> @llvm.cttz.nvx4i8(<vscale x 4 x i8>)
+declare <vscale x 8 x i8> @llvm.cttz.nvx8i8(<vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.cttz.nvx16i8(<vscale x 16 x i8>)
+declare i16 @llvm.cttz.i16(i16)
+declare <2 x i16> @llvm.cttz.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.cttz.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>)
+declare <vscale x 2 x i16> @llvm.cttz.nvx2i16(<vscale x 2 x i16>)
+declare <vscale x 4 x i16> @llvm.cttz.nvx4i16(<vscale x 4 x i16>)
+declare <vscale x 8 x i16> @llvm.cttz.nvx8i16(<vscale x 8 x i16>)
+declare <vscale x 16 x i16> @llvm.cttz.nvx16i16(<vscale x 16 x i16>)
+declare i32 @llvm.cttz.i32(i32)
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>)
+declare <vscale x 2 x i32> @llvm.cttz.nvx2i32(<vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.cttz.nvx4i32(<vscale x 4 x i32>)
+declare <vscale x 8 x i32> @llvm.cttz.nvx8i32(<vscale x 8 x i32>)
+declare <vscale x 16 x i32> @llvm.cttz.nvx16i32(<vscale x 16 x i32>)
+declare i64 @llvm.cttz.i64(i64)
+declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.cttz.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.cttz.v16i64(<16 x i64>)
+declare <vscale x 2 x i64> @llvm.cttz.nvx2i64(<vscale x 2 x i64>)
+declare <vscale x 4 x i64> @llvm.cttz.nvx4i64(<vscale x 4 x i64>)
+declare <vscale x 8 x i64> @llvm.cttz.nvx8i64(<vscale x 8 x i64>)
+declare <vscale x 16 x i64> @llvm.cttz.nvx16i64(<vscale x 16 x i64>)


        


More information about the llvm-commits mailing list