[clang] 95c0125 - [Clang][RISCV] Add rvv vsetvl and vsetvlmax intrinsic functions.
Zakk Chen via cfe-commits
cfe-commits at lists.llvm.org
Wed Mar 17 20:26:20 PDT 2021
Author: Zakk Chen
Date: 2021-03-17T20:26:06-07:00
New Revision: 95c0125f2bc610d9c51d4fbdd1144fcab40f3b51
URL: https://github.com/llvm/llvm-project/commit/95c0125f2bc610d9c51d4fbdd1144fcab40f3b51
DIFF: https://github.com/llvm/llvm-project/commit/95c0125f2bc610d9c51d4fbdd1144fcab40f3b51.diff
LOG: [Clang][RISCV] Add rvv vsetvl and vsetvlmax intrinsic functions.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D96843
Added:
clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
Modified:
clang/include/clang/Basic/riscv_vector.td
clang/utils/TableGen/RISCVVEmitter.cpp
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c69b5be1798c..0efe10c94f2e 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -159,7 +159,11 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// This builtin is valid for the given Log2LMULs.
list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
- // Emit the automatic clang codegen. It describes what types we have to use
+ // Manual code in clang codegen riscv_vector_builtin_cg.inc
+ code ManualCodegen = [{}];
+ code ManualCodegenMask = [{}];
+
+ // When emit the automatic clang codegen, it describes what types we have to use
// to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
// k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
// parameter of the unmasked version. k can't be the mask operand's position.
@@ -171,6 +175,11 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// If HasMask, this is the ID of the LLVM intrinsic we want to lower to.
string IRNameMask = NAME #"_mask";
+
+ // If non empty, this is the code emitted in the header, otherwise
+ // an automatic definition in header is emitted.
+ string HeaderCode = "";
+
}
//===----------------------------------------------------------------------===//
@@ -195,6 +204,80 @@ multiclass RVVBinBuiltinSet<string intrinsic_name, string type_range,
}
}
+
+// 6. Configuration-Setting Instructions
+// 6.1. vsetvli/vsetvl instructions
+let HasVL = false,
+ HasMask = false,
+ HasSideEffects = true,
+ HasGeneric = false,
+ Log2LMUL = [0],
+ ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
+{
+ // vsetvl is a macro because for it require constant integers in SEW and LMUL.
+ let HeaderCode =
+[{
+#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
+#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
+#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
+#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
+#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
+#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
+#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
+
+#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
+#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
+#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
+#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
+#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
+#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
+
+#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
+#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
+#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
+#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
+#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
+
+#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
+#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
+#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
+#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
+
+}] in
+ def vsetvli : RVVBuiltin<"", "zzKzKz", "i">;
+
+ let HeaderCode =
+[{
+#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
+#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
+#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
+#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
+#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
+#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
+#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
+
+#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
+#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
+#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
+#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
+#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
+#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
+
+#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
+#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
+#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
+#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
+#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
+
+#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
+#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
+#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
+#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
+
+}] in
+ def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">;
+}
+
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
defm vadd : RVVBinBuiltinSet<"vadd", "csil",
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
new file mode 100644
index 000000000000..e837653304a7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
@@ -0,0 +1,451 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -emit-llvm -o - %s \
+// RUN: | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -emit-llvm -o - %s \
+// RUN: | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -Werror -Wall -o - \
+// RUN: %s > /dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8m1(size_t avl) {
+ return vsetvl_e8m1(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8m2(size_t avl) {
+ return vsetvl_e8m2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8m4(size_t avl) {
+ return vsetvl_e8m4(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8m8(size_t avl) {
+ return vsetvl_e8m8(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 7)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 7)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8mf2(size_t avl) {
+ return vsetvl_e8mf2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 6)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 6)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8mf4(size_t avl) {
+ return vsetvl_e8mf4(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 0, i32 5)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 5)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e8mf8(size_t avl) {
+ return vsetvl_e8mf8(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 1, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e16m1(size_t avl) {
+ return vsetvl_e16m1(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 1, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e16m2(size_t avl) {
+ return vsetvl_e16m2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 1, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e16m4(size_t avl) {
+ return vsetvl_e16m4(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 1, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e16m8(size_t avl) {
+ return vsetvl_e16m8(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 1, i32 7)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 7)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e16mf2(size_t avl) {
+ return vsetvl_e16mf2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 1, i32 6)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 6)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e16mf4(size_t avl) {
+ return vsetvl_e16mf4(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 2, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e32m1(size_t avl) {
+ return vsetvl_e32m1(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 2, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e32m2(size_t avl) {
+ return vsetvl_e32m2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 2, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e32m4(size_t avl) {
+ return vsetvl_e32m4(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 2, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e32m8(size_t avl) {
+ return vsetvl_e32m8(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 2, i32 7)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 7)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e32mf2(size_t avl) {
+ return vsetvl_e32mf2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 3, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e64m1(size_t avl) {
+ return vsetvl_e64m1(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 3, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e64m2(size_t avl) {
+ return vsetvl_e64m2(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 3, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e64m4(size_t avl) {
+ return vsetvl_e64m4(avl);
+}
+
+// CHECK-RV32-LABEL: @test_vsetvl_e64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[AVL_ADDR:%.*]] = alloca i32, align 4
+// CHECK-RV32-NEXT: store i32 [[AVL:%.*]], i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = load i32, i32* [[AVL_ADDR]], align 4
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 [[TMP0]], i32 3, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vsetvl_e64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8
+// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP1]]
+//
+size_t test_vsetvl_e64m8(size_t avl) {
+ return vsetvl_e64m8(avl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
new file mode 100644
index 000000000000..2d2dd6e53174
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
@@ -0,0 +1,319 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -emit-llvm -o - %s \
+// RUN: | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -emit-llvm -o - %s \
+// RUN: | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -Werror -Wall -o - \
+// RUN: %s > /dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8m1() {
+ return vsetvlmax_e8m1();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8m2() {
+ return vsetvlmax_e8m2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8m4() {
+ return vsetvlmax_e8m4();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8m8() {
+ return vsetvlmax_e8m8();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 7)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 7)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8mf2() {
+ return vsetvlmax_e8mf2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 6)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 6)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8mf4() {
+ return vsetvlmax_e8mf4();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 0, i32 5)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 5)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e8mf8() {
+ return vsetvlmax_e8mf8();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e16m1() {
+ return vsetvlmax_e16m1();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e16m2() {
+ return vsetvlmax_e16m2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e16m4() {
+ return vsetvlmax_e16m4();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e16m8() {
+ return vsetvlmax_e16m8();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 7)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 7)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e16mf2() {
+ return vsetvlmax_e16mf2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 6)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 6)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e16mf4() {
+ return vsetvlmax_e16mf4();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 2, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e32m1() {
+ return vsetvlmax_e32m1();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 2, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e32m2() {
+ return vsetvlmax_e32m2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 2, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e32m4() {
+ return vsetvlmax_e32m4();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 2, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e32m8() {
+ return vsetvlmax_e32m8();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 2, i32 7)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 7)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e32mf2() {
+ return vsetvlmax_e32mf2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 3, i32 0)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 0)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e64m1() {
+ return vsetvlmax_e64m1();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 3, i32 1)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 1)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e64m2() {
+ return vsetvlmax_e64m2();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 3, i32 2)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 2)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e64m4() {
+ return vsetvlmax_e64m4();
+}
+
+// CHECK-RV32-LABEL: @test_vsetvlmax_e64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 3, i32 3)
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsetvlmax_e64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 3)
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+size_t test_vsetvlmax_e64m8() {
+ return vsetvlmax_e64m8();
+}
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index f2b555a8b05c..3802fee9afb9 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -145,6 +145,8 @@ class RVVIntrinsic {
bool HasMaskedOffOperand;
bool HasVL;
bool HasGeneric;
+ bool HasAutoDef; // There is automiatic definition in header
+ std::string ManualCodegen;
RVVTypePtr OutputType; // Builtin output type
RVVTypes InputTypes; // Builtin input types
// The types we use to obtain the specific LLVM intrinsic. They are index of
@@ -159,8 +161,8 @@ class RVVIntrinsic {
RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
StringRef IRName, bool HasSideEffects, bool IsMask,
bool HasMaskedOffOperand, bool HasVL, bool HasGeneric,
- const RVVTypes &Types,
- const std::vector<int64_t> &RVVIntrinsicTypes);
+ bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types,
+ const std::vector<int64_t> &IntrinsicTypes);
~RVVIntrinsic() = default;
StringRef getName() const { return Name; }
@@ -169,6 +171,8 @@ class RVVIntrinsic {
bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
bool hasVL() const { return HasVL; }
bool hasGeneric() const { return HasGeneric; }
+ bool hasManualCodegen() const { return !ManualCodegen.empty(); }
+ bool hasAutoDef() const { return HasAutoDef; }
size_t getNumOperand() const { return InputTypes.size(); }
StringRef getIRName() const { return IRName; }
uint8_t getRISCVExtensions() const { return RISCVExtensions; }
@@ -190,6 +194,7 @@ class RVVIntrinsic {
class RVVEmitter {
private:
RecordKeeper &Records;
+ std::string HeaderCode;
// Concat BasicType, LMUL and Proto as key
StringMap<RVVType> LegalTypes;
StringSet<> IllegalTypes;
@@ -637,11 +642,13 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
StringRef NewMangledName, StringRef IRName,
bool HasSideEffects, bool IsMask,
bool HasMaskedOffOperand, bool HasVL,
- bool HasGeneric, const RVVTypes &OutInTypes,
+ bool HasGeneric, bool HasAutoDef,
+ StringRef ManualCodegen, const RVVTypes &OutInTypes,
const std::vector<int64_t> &NewIntrinsicTypes)
: IRName(IRName), HasSideEffects(HasSideEffects),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
- HasGeneric(HasGeneric) {
+ HasGeneric(HasGeneric), HasAutoDef(HasAutoDef),
+ ManualCodegen(ManualCodegen.str()) {
// Init Name and MangledName
Name = NewName.str();
@@ -702,7 +709,13 @@ std::string RVVIntrinsic::getBuiltinTypeStr() const {
}
void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
+
OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n";
+ if (hasManualCodegen()) {
+ OS << ManualCodegen;
+ OS << "break;\n";
+ return;
+ }
OS << " IntrinsicTypes = {";
ListSeparator LS;
for (const auto &Idx : IntrinsicTypes) {
@@ -792,6 +805,11 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
createRVVIntrinsics(Defs);
+ // Print header code
+ if (!HeaderCode.empty()) {
+ OS << HeaderCode;
+ }
+
auto printType = [&](auto T) {
OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr()
<< ";\n";
@@ -910,7 +928,6 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) {
void RVVEmitter::createRVVIntrinsics(
std::vector<std::unique_ptr<RVVIntrinsic>> &Out) {
-
std::vector<Record *> RV = Records.getAllDerivedDefinitions("RVVBuiltin");
for (auto *R : RV) {
StringRef Name = R->getValueAsString("Name");
@@ -924,11 +941,18 @@ void RVVEmitter::createRVVIntrinsics(
bool HasGeneric = R->getValueAsBit("HasGeneric");
bool HasSideEffects = R->getValueAsBit("HasSideEffects");
std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
+ StringRef ManualCodegen = R->getValueAsString("ManualCodegen");
+ StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask");
std::vector<int64_t> IntrinsicTypes =
R->getValueAsListOfInts("IntrinsicTypes");
StringRef IRName = R->getValueAsString("IRName");
StringRef IRNameMask = R->getValueAsString("IRNameMask");
+ StringRef HeaderCodeStr = R->getValueAsString("HeaderCode");
+ bool HasAutoDef = HeaderCodeStr.empty();
+ if (!HeaderCodeStr.empty()) {
+ HeaderCode += HeaderCodeStr.str();
+ }
// Parse prototype and create a list of primitive type with transformers
// (operand) in ProtoSeq. ProtoSeq[0] is output operand.
SmallVector<std::string, 8> ProtoSeq;
@@ -955,7 +979,7 @@ void RVVEmitter::createRVVIntrinsics(
ProtoMaskSeq.push_back("z");
}
- // Create intrinsics for each type and LMUL.
+ // Create Intrinsics for each type and LMUL.
for (char I : TypeRange) {
for (int Log2LMUL : Log2LMULList) {
Optional<RVVTypes> Types = computeTypes(I, Log2LMUL, ProtoSeq);
@@ -965,11 +989,11 @@ void RVVEmitter::createRVVIntrinsics(
auto SuffixStr =
computeType(I, Log2LMUL, Suffix).getValue()->getShortStr();
- // Create a non-mask intrinsic.
+ // Create a non-mask intrinsic
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, MangledName, IRName, HasSideEffects,
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasGeneric,
- Types.getValue(), IntrinsicTypes));
+ HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes));
if (HasMask) {
// Create a mask intrinsic
Optional<RVVTypes> MaskTypes =
@@ -977,9 +1001,10 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, MangledName, IRNameMask, HasSideEffects,
/*IsMask=*/true, HasMaskedOffOperand, HasVL, HasGeneric,
- MaskTypes.getValue(), IntrinsicTypes));
+ HasAutoDef, ManualCodegenMask, MaskTypes.getValue(),
+ IntrinsicTypes));
}
- } // end for Log2LMUL
+ } // end for Log2LMULList
} // end for TypeRange
}
}
@@ -1039,7 +1064,8 @@ void RVVEmitter::emitArchMacroAndBody(
NeedEndif = emitExtDefStr(CurExt, OS);
PrevExt = CurExt;
}
- PrintBody(OS, *Def);
+ if (Def->hasAutoDef())
+ PrintBody(OS, *Def);
}
if (NeedEndif)
OS << "#endif\n\n";
More information about the cfe-commits
mailing list