[clang] [llvm] [RISCV] Support ZVqdot Codegen and C intrinsics (PR #154915)
Brandon Wu via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 22 02:18:14 PDT 2025
https://github.com/4vtomat created https://github.com/llvm/llvm-project/pull/154915
spec: https://github.com/riscv/riscv-dot-product/tree/main
Node: we pack 4 int8/uint8 element in rs1 to a uint32.
>From fc38c1a0f64cbcc2f54bb0fa7f00b84f06316ad5 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Fri, 22 Aug 2025 02:13:52 -0700
Subject: [PATCH] [RISCV] Support ZVqdot Codegen and C intrinsics
spec: https://github.com/riscv/riscv-dot-product/tree/main
Node: we pack 4 int8/uint8 element in rs1 to a uint32.
---
clang/include/clang/Basic/riscv_vector.td | 21 +
.../non-policy/non-overloaded/vqdot_vv.c | 120 +++++
.../non-policy/non-overloaded/vqdot_vx.c | 118 +++++
.../non-policy/non-overloaded/vqdotsu_vv.c | 122 +++++
.../non-policy/non-overloaded/vqdotsu_vx.c | 122 +++++
.../non-policy/non-overloaded/vqdotu_vv.c | 122 +++++
.../non-policy/non-overloaded/vqdotu_vx.c | 122 +++++
.../non-policy/non-overloaded/vqdotus_vx.c | 122 +++++
.../non-policy/overloaded/vqdot_vv.c | 120 +++++
.../non-policy/overloaded/vqdot_vx.c | 118 +++++
.../non-policy/overloaded/vqdotsu_vv.c | 122 +++++
.../non-policy/overloaded/vqdotsu_vx.c | 122 +++++
.../non-policy/overloaded/vqdotu_vv.c | 122 +++++
.../non-policy/overloaded/vqdotu_vx.c | 122 +++++
.../non-policy/overloaded/vqdotus_vx.c | 122 +++++
.../policy/non-overloaded/vqdot_vv.c | 207 ++++++++
.../policy/non-overloaded/vqdot_vx.c | 207 ++++++++
.../policy/non-overloaded/vqdotsu_vv.c | 207 ++++++++
.../policy/non-overloaded/vqdotsu_vx.c | 207 ++++++++
.../policy/non-overloaded/vqdotu_vv.c | 207 ++++++++
.../policy/non-overloaded/vqdotu_vx.c | 207 ++++++++
.../policy/non-overloaded/vqdotus_vx.c | 207 ++++++++
.../policy/overloaded/vqdot_vv.c | 242 +++++++++
.../policy/overloaded/vqdot_vx.c | 238 +++++++++
.../policy/overloaded/vqdotsu_vv.c | 242 +++++++++
.../policy/overloaded/vqdotsu_vx.c | 242 +++++++++
.../policy/overloaded/vqdotu_vv.c | 242 +++++++++
.../policy/overloaded/vqdotu_vx.c | 242 +++++++++
.../policy/overloaded/vqdotus_vx.c | 242 +++++++++
llvm/include/llvm/IR/IntrinsicsRISCV.td | 45 ++
.../lib/Target/RISCV/RISCVInstrInfoZvqdotq.td | 62 +++
llvm/test/CodeGen/RISCV/rvv/vqdot.ll | 468 ++++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/vqdot_su.ll | 468 ++++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/vqdot_u.ll | 468 ++++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/vqdot_us.ll | 236 +++++++++
35 files changed, 6603 insertions(+)
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotus_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotus_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotus_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotus_vx.c
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vqdot.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vqdot_su.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vqdot_u.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vqdot_us.ll
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c1de2bfe4243d..52ab5252e219d 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2547,3 +2547,24 @@ let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in {
defm vsm3me : RVVOutOp1BuiltinSet<"vsm3me", "i", [["vv", "Uv", "UvUvUv"]]>;
}
}
+
+multiclass RVVVQDOTQBuiltinSet<list<list<string>> suffixes_prototypes> {
+ let UnMaskedPolicyScheme = HasPolicyOperand,
+ HasMaskedOffOperand = false,
+ OverloadedName = NAME,
+ Log2LMUL = [-1, 0, 1, 2, 3] in {
+ defm NAME : RVVOutOp1Op2BuiltinSet<NAME, "i", suffixes_prototypes>;
+ }
+}
+
+// Only SEW=32 is defined for zvqdotq so far, and since inputs are in fact four
+// 8-bit integer bundles, we use unsigned type to represent all of them
+let RequiredFeatures = ["zvqdotq"] in {
+ defm vqdot : RVVVQDOTQBuiltinSet<[["vv", "v", "vv(FixedSEW:8)v(FixedSEW:8)v"],
+ ["vx", "v", "vv(FixedSEW:8)vUe"]]>;
+ defm vqdotu : RVVVQDOTQBuiltinSet<[["vv", "Uv", "UvUv(FixedSEW:8)Uv(FixedSEW:8)Uv"],
+ ["vx", "Uv", "UvUv(FixedSEW:8)UvUe"]]>;
+ defm vqdotsu : RVVVQDOTQBuiltinSet<[["vv", "v", "vv(FixedSEW:8)v(FixedSEW:8)Uv"],
+ ["vx", "v", "vv(FixedSEW:8)vUe"]]>;
+ defm vqdotus : RVVVQDOTQBuiltinSet<[["vx", "v", "vv(FixedSEW:8)UvUe"]]>;
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vv.c
new file mode 100644
index 0000000000000..b51c62823e60c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vv.c
@@ -0,0 +1,120 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1(vint32m1_t vd, vint8m1_t vs2,
+ vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2(vint32m2_t vd, vint8m2_t vs2,
+ vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4(vint32m4_t vd, vint8m4_t vs2,
+ vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8(vint32m8_t vd, vint8m8_t vs2,
+ vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_vv_i32mf2_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_vv_i32m1_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_vv_i32m2_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2,
+ vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m4_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2,
+ vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m8_m(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vx.c
new file mode 100644
index 0000000000000..a3f5d9282cf47
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdot_vx.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32mf2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1(vint32m1_t vd, vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_vx_i32m1(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2(vint32m2_t vd, vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_vx_i32m2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4(vint32m4_t vd, vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_vx_i32m4(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8(vint32m8_t vd, vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_vx_i32m8(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_vx_i32mf2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m1_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m4_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m8_m(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vv.c
new file mode 100644
index 0000000000000..4246981fbbf05
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vv.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1(vint32m1_t vd, vint8m1_t vs2,
+ vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2(vint32m2_t vd, vint8m2_t vs2,
+ vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4(vint32m4_t vd, vint8m4_t vs2,
+ vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8(vint32m8_t vd, vint8m8_t vs2,
+ vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vv_i32mf2_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vv_i32m1_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vv_i32m2_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_m(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vv_i32m4_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_m(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vv_i32m8_m(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vx.c
new file mode 100644
index 0000000000000..5f1ff06bff7ca
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotsu_vx.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32mf2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1(vint32m1_t vd, vint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m1(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2(vint32m2_t vd, vint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4(vint32m4_t vd, vint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m4(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8(vint32m8_t vd, vint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m8(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vx_i32mf2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vx_i32m1_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vx_i32m2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_m(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vx_i32m4_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_m(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_vx_i32m8_m(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vv.c
new file mode 100644
index 0000000000000..8c8cbfb617f48
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vv.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2(vuint32mf2_t vd, vuint8mf2_t vs2,
+ vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1(vuint32m1_t vd, vuint8m1_t vs2,
+ vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2(vuint32m2_t vd, vuint8m2_t vs2,
+ vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4(vuint32m4_t vd, vuint8m4_t vs2,
+ vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8(vuint32m8_t vd, vuint8m8_t vs2,
+ vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_vv_u32mf2_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_vv_u32m1_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_vv_u32m2_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_vv_u32m4_m(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_vv_u32m8_m(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vx.c
new file mode 100644
index 0000000000000..afe46fe5f9709
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotu_vx.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2(vuint32mf2_t vd, vuint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32mf2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1(vuint32m1_t vd, vuint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m1(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2(vuint32m2_t vd, vuint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4(vuint32m4_t vd, vuint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m4(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8(vuint32m8_t vd, vuint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m8(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_vx_u32mf2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_vx_u32m1_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_vx_u32m2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_vx_u32m4_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_vx_u32m8_m(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotus_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotus_vx.c
new file mode 100644
index 0000000000000..070a7378c1285
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vqdotus_vx.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2(vint32mf2_t vd, vuint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32mf2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1(vint32m1_t vd, vuint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m1(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2(vint32m2_t vd, vuint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m2(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4(vint32m4_t vd, vuint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m4(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8(vint32m8_t vd, vuint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m8(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_vx_i32mf2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_vx_i32m1_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_vx_i32m2_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_m(vbool8_t vm, vint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_vx_i32m4_m(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_m(vbool4_t vm, vint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_vx_i32m8_m(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vv.c
new file mode 100644
index 0000000000000..46f8cd14da0dd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vv.c
@@ -0,0 +1,120 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1(vint32m1_t vd, vint8m1_t vs2,
+ vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2(vint32m2_t vd, vint8m2_t vs2,
+ vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4(vint32m4_t vd, vint8m4_t vs2,
+ vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8(vint32m8_t vd, vint8m8_t vs2,
+ vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2,
+ vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2,
+ vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vx.c
new file mode 100644
index 0000000000000..275cba041a148
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdot_vx.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1(vint32m1_t vd, vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2(vint32m2_t vd, vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4(vint32m4_t vd, vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8(vint32m8_t vd, vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vv.c
new file mode 100644
index 0000000000000..a1cc1cfa6e44d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vv.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1(vint32m1_t vd, vint8m1_t vs2,
+ vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2(vint32m2_t vd, vint8m2_t vs2,
+ vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4(vint32m4_t vd, vint8m4_t vs2,
+ vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8(vint32m8_t vd, vint8m8_t vs2,
+ vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_m(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_m(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vx.c
new file mode 100644
index 0000000000000..8c940c74e4069
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotsu_vx.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2(vint32mf2_t vd, vint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1(vint32m1_t vd, vint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2(vint32m2_t vd, vint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4(vint32m4_t vd, vint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8(vint32m8_t vd, vint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_m(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_m(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vv.c
new file mode 100644
index 0000000000000..51f86b504438c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vv.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2(vuint32mf2_t vd, vuint8mf2_t vs2,
+ vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1(vuint32m1_t vd, vuint8m1_t vs2,
+ vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2(vuint32m2_t vd, vuint8m2_t vs2,
+ vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4(vuint32m4_t vd, vuint8m4_t vs2,
+ vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8(vuint32m8_t vd, vuint8m8_t vs2,
+ vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vx.c
new file mode 100644
index 0000000000000..2ddd2ef129519
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotu_vx.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2(vuint32mf2_t vd, vuint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1(vuint32m1_t vd, vuint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2(vuint32m2_t vd, vuint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4(vuint32m4_t vd, vuint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8(vuint32m8_t vd, vuint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotus_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotus_vx.c
new file mode 100644
index 0000000000000..148746f2d48aa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vqdotus_vx.c
@@ -0,0 +1,122 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2(vint32mf2_t vd, vuint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1(vint32m1_t vd, vuint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2(vint32m2_t vd, vuint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4(vint32m4_t vd, vuint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8(vint32m8_t vd, vuint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_m(vbool32_t vm, vint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_m(vbool16_t vm, vint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_m(vbool8_t vm, vint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_m(vbool4_t vm, vint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vv.c
new file mode 100644
index 0000000000000..4103f00cc56b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vv.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_tu(vint32m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_tu(vint32m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_tu(vint32m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_tu(vint32m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_vv_i32m8_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vx.c
new file mode 100644
index 0000000000000..fc2089142e4e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdot_vx.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_tu(vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_tu(vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_tu(vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_tu(vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_vx_i32m8_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vv.c
new file mode 100644
index 0000000000000..23a873dcca1e7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vv.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_tu(vint32m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_tu(vint32m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_tu(vint32m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_tu(vint32m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu_vv_i32m8_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vx.c
new file mode 100644
index 0000000000000..5cdf45978070f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotsu_vx.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_tu(vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_tu(vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_tu(vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_tu(vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_vx_i32m8_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vv.c
new file mode 100644
index 0000000000000..455dfd33a8350
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vv.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_tu(vuint32mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_tu(vuint32m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_tu(vuint32m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_tu(vuint32m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_tu(vuint32m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu_vv_u32m8_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vx.c
new file mode 100644
index 0000000000000..f254e752549fe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotu_vx.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_tu(vuint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_tu(vuint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_tu(vuint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_tu(vuint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_tu(vuint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_vx_u32m8_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotus_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotus_vx.c
new file mode 100644
index 0000000000000..7ffcc1c3efd15
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vqdotus_vx.c
@@ -0,0 +1,207 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_tu(vint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_tu(vint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_tu(vint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_tu(vint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_tu(vint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vuint8mf2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vuint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vuint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vuint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vuint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_vx_i32m8_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vv.c
new file mode 100644
index 0000000000000..7eafe584ff892
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vv.c
@@ -0,0 +1,242 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2,
+ vint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_tu(vint32m1_t vd, vint8m1_t vs2,
+ vint8m1_t vs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_tu(vint32m2_t vd, vint8m2_t vs2,
+ vint8m2_t vs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_tu(vint32m4_t vd, vint8m4_t vs2,
+ vint8m4_t vs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_tu(vint32m8_t vd, vint8m8_t vs2,
+ vint8m8_t vs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vv_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vv_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vv_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vv_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vv_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vx.c
new file mode 100644
index 0000000000000..7429c79746630
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdot_vx.c
@@ -0,0 +1,238 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_tu(vint32m1_t vd, vint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_tu(vint32m2_t vd, vint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_tu(vint32m4_t vd, vint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_tu(vint32m8_t vd, vint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdot_vx_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdot_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdot_vx_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdot_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdot_vx_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdot_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdot_vx_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdot_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdot_vx_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdot_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1, size_t vl) {
+ return __riscv_vqdot_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vv.c
new file mode 100644
index 0000000000000..9d002876b3419
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vv.c
@@ -0,0 +1,242 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2,
+ vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_tu(vint32m1_t vd, vint8m1_t vs2,
+ vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_tu(vint32m2_t vd, vint8m2_t vs2,
+ vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_tu(vint32m4_t vd, vint8m4_t vs2,
+ vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_tu(vint32m8_t vd, vint8m8_t vs2,
+ vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vv_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vv_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vv_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vv_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vv_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vx.c
new file mode 100644
index 0000000000000..d85d3324622d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotsu_vx.c
@@ -0,0 +1,242 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_tu(vint32mf2_t vd, vint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_tu(vint32m1_t vd, vint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_tu(vint32m2_t vd, vint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_tu(vint32m4_t vd, vint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_tu(vint32m8_t vd, vint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotsu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotsu_vx_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+ vint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotsu_vx_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd,
+ vint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotsu_vx_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd,
+ vint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotsu_vx_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd,
+ vint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotsu_vx_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd,
+ vint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotsu_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vv.c
new file mode 100644
index 0000000000000..eb83676a36dd8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vv.c
@@ -0,0 +1,242 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_tu(vuint32mf2_t vd, vuint8mf2_t vs2,
+ vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_tu(vuint32m1_t vd, vuint8m1_t vs2,
+ vuint8m1_t vs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_tu(vuint32m2_t vd, vuint8m2_t vs2,
+ vuint8m2_t vs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_tu(vuint32m4_t vd, vuint8m4_t vs2,
+ vuint8m4_t vs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_tu(vuint32m8_t vd, vuint8m8_t vs2,
+ vuint8m8_t vs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vv_u32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, vuint8mf2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vv_u32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, vuint8m1_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vv_u32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, vuint8m2_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vv_u32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, vuint8m4_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vv_u32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, vuint8m8_t vs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vx.c
new file mode 100644
index 0000000000000..9a46ab2cb5737
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotu_vx.c
@@ -0,0 +1,242 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_tu(vuint32mf2_t vd, vuint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_tu(vuint32m1_t vd, vuint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_tu(vuint32m2_t vd, vuint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_tu(vuint32m4_t vd, vuint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_tu(vuint32m8_t vd, vuint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotu_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotu_vx_u32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vqdotu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotu_vx_u32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vqdotu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotu_vx_u32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vqdotu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotu_vx_u32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vqdotu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotu_vx_u32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vqdotu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotu_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotus_vx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotus_vx.c
new file mode 100644
index 0000000000000..62fbe498cb5c2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vqdotus_vx.c
@@ -0,0 +1,242 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvqdotq -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <sifive_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_tu(vint32mf2_t vd, vuint8mf2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_tu(vint32m1_t vd, vuint8m1_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_tu(vint32m2_t vd, vuint8m2_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_tu(vint32m4_t vd, vuint8m4_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_tu(vint32m8_t vd, vuint8m8_t vs2,
+ uint32_t rs1, size_t vl) {
+ return __riscv_vqdotus_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vqdotus_vx_i32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.nxv4i8.i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 4 x i8> [[VS2]], i32 [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vqdotus_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+ vuint8mf2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vqdotus_vx_i32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.nxv8i8.i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 8 x i8> [[VS2]], i32 [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vqdotus_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd,
+ vuint8m1_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vqdotus_vx_i32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.nxv16i8.i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 16 x i8> [[VS2]], i32 [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vqdotus_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd,
+ vuint8m2_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vqdotus_vx_i32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.nxv32i8.i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 32 x i8> [[VS2]], i32 [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vqdotus_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd,
+ vuint8m4_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vqdotus_vx_i32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 noundef signext [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.nxv64i8.i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 64 x i8> [[VS2]], i32 [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vqdotus_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd,
+ vuint8m8_t vs2, uint32_t rs1,
+ size_t vl) {
+ return __riscv_vqdotus_mu(vm, vd, vs2, rs1, vl);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 1784454f1b73a..39a6a2b35aefd 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1907,6 +1907,51 @@ let TargetPrefix = "riscv" in {
def int_riscv_vsm3me : RISCVBinaryAAXUnMasked;
} // TargetPrefix = "riscv"
+//===----------------------------------------------------------------------===//
+// Zvqdotq - Vector quad widening 4D Dot Product
+//
+// 8-bit Integer dot-product instructions performing the dot product between two
+// 4-element vectors of 8-bit integer elements and accumulating it into a 32-bit
+// integer accumulator.
+let TargetPrefix = "riscv" in {
+ // We use llvm_anyvector_ty and llvm_anyint_ty for future extensibility
+ // purpose but only EEW=32 is defined for now
+ // Input: (vector_in, vector_in, vector_in/scalar_in, vl, policy)
+ class RISCVVQDOTUnMasked<bit HasVV>
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ !if(HasVV, llvm_any_ty, llvm_anyint_ty),
+ llvm_anyint_ty, LLVMMatchType<3>],
+ [ImmArg<ArgIndex<4>>, IntrNoMem]>,
+ RISCVVIntrinsic {
+ let ScalarOperand = 2;
+ let VLOperand = 3;
+ }
+ // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
+ class RISCVVQDOTMasked<bit HasVV>
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ !if(HasVV, llvm_any_ty, llvm_anyint_ty),
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyint_ty, LLVMMatchType<3>],
+ [ImmArg<ArgIndex<5>>, IntrNoMem]>,
+ RISCVVIntrinsic {
+ let ScalarOperand = 2;
+ let VLOperand = 4;
+ }
+
+ multiclass RISCVVQDOT<bit HasVV = 1> {
+ def "int_riscv_" # NAME : RISCVVQDOTUnMasked<HasVV=HasVV>;
+ def "int_riscv_" # NAME # "_mask" : RISCVVQDOTMasked<HasVV=HasVV>;
+ }
+
+ defm vqdot : RISCVVQDOT;
+ defm vqdotu : RISCVVQDOT;
+ defm vqdotsu : RISCVVQDOT;
+ defm vqdotus : RISCVVQDOT<HasVV=0>;
+} // TargetPrefix = "riscv"
+
+
// Zihintpause extensions
//===----------------------------------------------------------------------===//
let TargetPrefix = "riscv" in
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td
index 27959eaccd904..85cda4752d3b5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvqdotq.td
@@ -52,6 +52,10 @@ let Predicates = [HasStdExtZvqdotq], mayLoad = 0, mayStore = 0,
defm PseudoVQDOT : VPseudoVQDOT_VV_VX;
defm PseudoVQDOTU : VPseudoVQDOT_VV_VX;
defm PseudoVQDOTSU : VPseudoVQDOT_VV_VX;
+ // VQDOTUS does not have a VV variant
+ foreach m = MxListVF4 in {
+ defm "PseudoVQDOTUS_VX" : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, GPR, m>;
+ }
}
defvar AllE32Vectors = [VI32MF2, VI32M1, VI32M2, VI32M4, VI32M8];
@@ -59,3 +63,61 @@ defm : VPatBinaryVL_VV_VX<riscv_vqdot_vl, "PseudoVQDOT", AllE32Vectors>;
defm : VPatBinaryVL_VV_VX<riscv_vqdotu_vl, "PseudoVQDOTU", AllE32Vectors>;
defm : VPatBinaryVL_VV_VX<riscv_vqdotsu_vl, "PseudoVQDOTSU", AllE32Vectors>;
+// These VPat definitions are for vqdot because they have a different operand
+// order with other ternary instructions (i.e. vop.vx vd, vs2, rs1)
+multiclass VPatTernaryV_VX_AAXASwapped<string intrinsic, string instruction,
+ list<VTypeInfoToWide> info_pairs> {
+ foreach pair = info_pairs in {
+ defvar VdInfo = pair.Wti;
+ defvar Vs2Info = pair.Vti;
+ let Predicates = GetVTypePredicates<VdInfo>.Predicates in
+ // we have op1 and op2 swapped here
+ defm : VPatTernaryWithPolicy<intrinsic, instruction,
+ "V"#VdInfo.ScalarSuffix,
+ VdInfo.Vector, Vs2Info.Vector, Vs2Info.Scalar,
+ VdInfo.Mask, VdInfo.Log2SEW, VdInfo.LMul,
+ VdInfo.RegClass, Vs2Info.RegClass,
+ Vs2Info.ScalarRegClass>;
+ }
+}
+
+multiclass VPatTernaryV_VV_AAXASwapped<string intrinsic, string instruction,
+ list<VTypeInfoToWide> info_pairs> {
+ foreach pair = info_pairs in {
+ defvar VdInfo = pair.Wti;
+ defvar Vs2Info = pair.Vti;
+ let Predicates = GetVTypePredicates<VdInfo>.Predicates in
+ // we have op1 and op2 swapped here
+ defm : VPatTernaryWithPolicy<intrinsic, instruction,
+ "VV",
+ VdInfo.Vector, Vs2Info.Vector, Vs2Info.Vector,
+ VdInfo.Mask, VdInfo.Log2SEW, VdInfo.LMul,
+ VdInfo.RegClass, Vs2Info.RegClass,
+ Vs2Info.RegClass>;
+ }
+}
+
+multiclass VPatTernaryV_VV_VX_AAXASwapped<string intrinsic, string instruction,
+ list<VTypeInfoToWide> info_pairs>
+ // keep the original VV because the order doesn't matter
+ : VPatTernaryV_VV_AAXASwapped<intrinsic, instruction, info_pairs>,
+ VPatTernaryV_VX_AAXASwapped<intrinsic, instruction, info_pairs>;
+
+defset list<VTypeInfoToWide> VQDOTInfoPairs = {
+ def : VTypeInfoToWide<VI8MF2, VI32MF2>;
+ def : VTypeInfoToWide<VI8M1, VI32M1>;
+ def : VTypeInfoToWide<VI8M2, VI32M2>;
+ def : VTypeInfoToWide<VI8M4, VI32M4>;
+ def : VTypeInfoToWide<VI8M8, VI32M8>;
+}
+
+let Predicates = [HasStdExtZvqdotq] in {
+ defm : VPatTernaryV_VV_VX_AAXASwapped<"int_riscv_vqdot", "PseudoVQDOT",
+ VQDOTInfoPairs>;
+ defm : VPatTernaryV_VV_VX_AAXASwapped<"int_riscv_vqdotu", "PseudoVQDOTU",
+ VQDOTInfoPairs>;
+ defm : VPatTernaryV_VV_VX_AAXASwapped<"int_riscv_vqdotsu", "PseudoVQDOTSU",
+ VQDOTInfoPairs>;
+ defm : VPatTernaryV_VX_AAXASwapped<"int_riscv_vqdotus", "PseudoVQDOTUS",
+ VQDOTInfoPairs>;
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdot.ll b/llvm/test/CodeGen/RISCV/rvv/vqdot.ll
new file mode 100644
index 0000000000000..b3dfcfa4973ef
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vqdot.ll
@@ -0,0 +1,468 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdot_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vv_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: vqdot.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdot_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vv_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: vqdot.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdot_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vv_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: vqdot.vv v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdot_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vv_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: vqdot.vv v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdot_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vv_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdot.vv v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdot_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vqdot.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdot_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vqdot.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdot_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vqdot.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdot_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vqdot.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdot_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdot.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdot_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: vqdot.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdot.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdot_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: vqdot.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdot.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdot_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: vqdot.vx v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdot.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdot_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT: vqdot.vx v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdot.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdot_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdot.vx v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdot.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdot_mask_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vqdot.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdot.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdot_mask_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vqdot.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdot.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdot_mask_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vqdot.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdot.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdot_mask_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vqdot.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdot.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdot_mask_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdot.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdot.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdot_su.ll b/llvm/test/CodeGen/RISCV/rvv/vqdot_su.ll
new file mode 100644
index 0000000000000..430a7aabb19da
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vqdot_su.ll
@@ -0,0 +1,468 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotsu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: vqdotsu.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotsu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: vqdotsu.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotsu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: vqdotsu.vv v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotsu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: vqdotsu.vv v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotsu_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdotsu.vv v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotsu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vqdotsu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotsu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vqdotsu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotsu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vqdotsu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotsu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vqdotsu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotsu_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdotsu.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotsu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: vqdotsu.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotsu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: vqdotsu.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotsu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: vqdotsu.vx v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotsu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT: vqdotsu.vx v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotsu_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdotsu.vx v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotsu_mask_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vqdotsu.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotsu.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotsu_mask_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vqdotsu.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotsu.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotsu_mask_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vqdotsu.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotsu.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotsu_mask_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vqdotsu.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotsu.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotsu_mask_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdotsu.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotsu.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdot_u.ll b/llvm/test/CodeGen/RISCV/rvv/vqdot_u.ll
new file mode 100644
index 0000000000000..6e8dcec57b396
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vqdot_u.ll
@@ -0,0 +1,468 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vv_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: vqdotu.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vv_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: vqdotu.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vv_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: vqdotu.vv v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vv_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: vqdotu.vv v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotu_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vv_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdotu.vv v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vqdotu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vqdotu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vqdotu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vqdotu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotu_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdotu.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: vqdotu.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: vqdotu.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: vqdotu.vx v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT: vqdotu.vx v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotu_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdotu.vx v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotu.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotu_mask_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vqdotu.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotu.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotu_mask_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vqdotu.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotu.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotu_mask_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vqdotu.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotu.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotu_mask_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vqdotu.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotu.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotu_mask_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdotu.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotu.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdot_us.ll b/llvm/test/CodeGen/RISCV/rvv/vqdot_us.ll
new file mode 100644
index 0000000000000..395cfa2c83162
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vqdot_us.ll
@@ -0,0 +1,236 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvqdotq \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i32> @llvm.riscv.vqdotus.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotus_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: vqdotus.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotus.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotus.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotus_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: vqdotus.vx v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotus.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotus.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotus_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: vqdotus.vx v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotus.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotus.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotus_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT: vqdotus.vx v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotus.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotus.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotus_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT: vqdotus.vx v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotus.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 4 x i8>,
+ i32,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vqdotus_mask_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 1 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vqdotus.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vqdotus.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2,
+ <vscale x 1 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i8>,
+ i32,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vqdotus_mask_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 2 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vqdotus.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vqdotus.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2,
+ <vscale x 2 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 16 x i8>,
+ i32,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vqdotus_mask_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 4 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vqdotus.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vqdotus.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2,
+ <vscale x 4 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 32 x i8>,
+ i32,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vqdotus_mask_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 8 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vqdotus.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vqdotus.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2,
+ <vscale x 8 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 64 x i8>,
+ i32,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vqdotus_mask_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 16 x i1> %m, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vqdotus.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vqdotus.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2,
+ <vscale x 16 x i1> %m,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i32> %a
+}
More information about the llvm-commits
mailing list