[clang] 28c3a74 - [Clang][RISCV] Improve diagnostic message for full multiply intrinsics

via cfe-commits cfe-commits at lists.llvm.org
Tue Jul 18 19:39:52 PDT 2023


Author: eopXD
Date: 2023-07-18T19:39:46-07:00
New Revision: 28c3a74a5c6c7ed1ac97a010ca080eaacfd7f324

URL: https://github.com/llvm/llvm-project/commit/28c3a74a5c6c7ed1ac97a010ca080eaacfd7f324
DIFF: https://github.com/llvm/llvm-project/commit/28c3a74a5c6c7ed1ac97a010ca080eaacfd7f324.diff

LOG: [Clang][RISCV] Improve diagnostic message for full multiply intrinsics

The full multiply intrinsics are not included for EEW=64 in Zve64*.
They require the V extension to be enabled.

This commit improves diagnostic message from

```
<source>:4:10: error: call to undeclared function '__riscv_vsmul_vv_i64m1';
    4 |   return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
```

to

```
test.c:5:10: error: builtin requires: v
    5 |   return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
```

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155416

Added: 
    clang/test/Sema/riscv-vector-v-check.c

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/include/clang/Support/RISCVVIntrinsicUtils.h
    clang/lib/Sema/SemaChecking.cpp
    clang/lib/Sema/SemaRISCVVectorLookup.cpp
    clang/utils/TableGen/RISCVVEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 49011d61af1a2a..7e5889812aecc8 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1713,13 +1713,11 @@ defm vmax : RVVSignedBinBuiltinSet;
 
 // 12.10. Vector Single-Width Integer Multiply Instructions
 defm vmul : RVVIntBinBuiltinSet;
-let RequiredFeatures = ["FullMultiply"] in {
 defm vmulh : RVVSignedBinBuiltinSet;
 defm vmulhu : RVVUnsignedBinBuiltinSet;
 defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
                                    [["vv", "v", "vvUv"],
                                     ["vx", "v", "vvUe"]]>;
-}
 
 // 12.11. Vector Integer Divide Instructions
 defm vdivu : RVVUnsignedBinBuiltinSet;
@@ -1859,9 +1857,7 @@ let ManualCodegen = [{
   defm vasub : RVVSignedBinBuiltinSetRoundingMode;
 
   // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
-  let RequiredFeatures = ["FullMultiply"] in {
   defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
-  }
 
   // 13.4. Vector Single-Width Scaling Shift Instructions
   defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;

diff  --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index e985a9bef888c4..804b1518c06b51 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -475,8 +475,7 @@ class RVVIntrinsic {
 enum RVVRequire : uint8_t {
   RVV_REQ_None = 0,
   RVV_REQ_RV64 = 1 << 0,
-  RVV_REQ_FullMultiply = 1 << 1,
-  RVV_REQ_Xsfvcp = 1 << 2,
+  RVV_REQ_Xsfvcp = 1 << 1,
 
   LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Xsfvcp)
 };

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index a8bcf498d18da4..de0977e9bb3a20 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -4527,6 +4527,73 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
   if (FeatureMissing)
     return true;
 
+  // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+  // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
+  switch (BuiltinID) {
+  default:
+    break;
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+  case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+  case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+    bool RequireV = false;
+    for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum)
+      RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType(
+          /* Bitwidth */ 64, /* IsFloat */ false);
+
+    if (RequireV && !TI.hasFeature("v"))
+      return Diag(TheCall->getBeginLoc(),
+                  diag::err_riscv_builtin_requires_extension)
+             << /* IsExtension */ false << TheCall->getSourceRange() << "v";
+
+    break;
+  }
+  }
+
   switch (BuiltinID) {
   case RISCVVector::BI__builtin_rvv_vsetvli:
     return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||

diff  --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
index fbed6e8d26cbc3..db2059e68b3d17 100644
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -202,7 +202,6 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
     ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
   const TargetInfo &TI = Context.getTargetInfo();
   bool HasRV64 = TI.hasFeature("64bit");
-  bool HasFullMultiply = TI.hasFeature("v");
   // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
   // in RISCVVEmitter.cpp.
   for (auto &Record : Recs) {
@@ -256,12 +255,6 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
           !HasRV64)
         continue;
 
-      if ((BaseType == BasicType::Int64) &&
-          ((Record.RequiredExtensions & RVV_REQ_FullMultiply) ==
-           RVV_REQ_FullMultiply) &&
-          !HasFullMultiply)
-        continue;
-
       // Expanded with 
diff erent LMUL.
       for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
         if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))

diff  --git a/clang/test/Sema/riscv-vector-v-check.c b/clang/test/Sema/riscv-vector-v-check.c
new file mode 100644
index 00000000000000..8faa92c7b3cf12
--- /dev/null
+++ b/clang/test/Sema/riscv-vector-v-check.c
@@ -0,0 +1,197 @@
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -disable-O0-optnone -o - -fsyntax-only %s -verify 
+// REQUIRES: riscv-registered-target
+#include <riscv_vector.h>
+
+vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return __riscv_vmulh_vv_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return __riscv_vmulh_vx_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhu_vv_u64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhu_vx_u64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m1(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m2(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m4(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m8(op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m1_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m2_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m4_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return __riscv_vmulhsu_vv_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}
+vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) {
+  return __riscv_vmulhsu_vx_i64m8_m(mask, op1, op2, vl); /* expected-error {{builtin requires: v}} */
+}

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index a9349f1fdc1c75..2db48f3fa4af50 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -655,7 +655,6 @@ void RVVEmitter::createRVVIntrinsics(
     for (auto RequiredFeature : RequiredFeatures) {
       RVVRequire RequireExt = StringSwitch<RVVRequire>(RequiredFeature)
                                   .Case("RV64", RVV_REQ_RV64)
-                                  .Case("FullMultiply", RVV_REQ_FullMultiply)
                                   .Case("Xsfvcp", RVV_REQ_Xsfvcp)
                                   .Default(RVV_REQ_None);
       assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");


        


More information about the cfe-commits mailing list