[llvm] b784719 - [RISCV] Add the passthru operand for RVV nomask binary intrinsics.
Zakk Chen via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 15 18:40:04 PST 2022
Author: Zakk Chen
Date: 2022-02-15T18:36:18-08:00
New Revision: b7847199044e46e0a138586e6f525ef071b3e1f8
URL: https://github.com/llvm/llvm-project/commit/b7847199044e46e0a138586e6f525ef071b3e1f8
DIFF: https://github.com/llvm/llvm-project/commit/b7847199044e46e0a138586e6f525ef071b3e1f8.diff
LOG: [RISCV] Add the passthru operand for RVV nomask binary intrinsics.
The goal is support tail and mask policy in RVV builtins.
We focus on IR part first.
If the passthru operand is undef, we use tail agnostic, otherwise
use tail undisturbed.
Add passthru operand for VSLIDE1UP_VL and VSLIDE1DOWN_VL to support
i64 scalar in rv32.
The masked VSLIDE1 would only emit mask undisturbed policy regardless
of giving mask agnostic policy until InsertVSETVLI supports mask agnostic.
Reviewed by: craig.topper, rogfer01
Differential Revision: https://reviews.llvm.org/D117989
Added:
llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
Modified:
clang/include/clang/Basic/riscv_vector.td
clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
clang/utils/TableGen/RISCVVEmitter.cpp
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfadd.ll
llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
llvm/test/CodeGen/RISCV/rvv/vfmax.ll
llvm/test/CodeGen/RISCV/rvv/vfmin.ll
llvm/test/CodeGen/RISCV/rvv/vfmul.ll
llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
llvm/test/CodeGen/RISCV/rvv/vfsub.ll
llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index d02a4fffb1411..c1d21d6e7aa35 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -173,20 +173,13 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// This builtin has a granted vector length parameter.
bit HasVL = true;
- // There are several cases for specifying tail policy.
- // 1. Add tail policy argument to masked intrinsics. It may have the maskedoff
- // argument or not.
- // * Have the maskedoff argument: (HasPolicy = true, HasMaskedOffOperand = true)
- // Ex: vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, ta);
- // * Do not have the maskedoff argument: (HasPolicy = true, HasMaskedOffOperand = false)
- // Ex: vmacc_vv_i8m1_mt(mask, vd, vs1, vs2, vl, ta);
- // 2. Add dest argument for no mask intrinsics. (TODO)
- // Ex: vmv_v_x_i8m1_t(dest, src, vl);
- // 3. Always tail agnostic. (HasPolicy = false)
- // Ex: vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
- // The tail policy argument is located at the last position.
+ // The masked intrinsic IR have the policy operand.
+ // The policy argument is located at the last position.
bit HasPolicy = true;
+ // The nomask intrinsic IR have the passthru operand.
+ bit HasNoMaskPassThru = false;
+
// This builtin supports non-masked function overloading api.
// All masked operations support overloading api.
bit HasNoMaskedOverloaded = true;
@@ -1290,6 +1283,8 @@ multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
cast<llvm::VectorType>(ResultType)->getElementType(),
Ops[1]->getType()};
Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1]));
+ // insert undef passthru
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
break;
}
}],
@@ -1321,6 +1316,8 @@ multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
Ops[1]->getType()};
Ops.insert(Ops.begin() + 1,
llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
+ // insert undef passthru
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
break;
}
}],
@@ -1369,6 +1366,8 @@ multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
IntrinsicTypes = {ResultType,
Ops[0]->getType(), Ops[1]->getType()};
Ops.insert(Ops.begin() + 1, Ops[0]);
+ // insert undef passthru
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
break;
}
}],
@@ -1402,6 +1401,8 @@ multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
cast<llvm::VectorType>(Ops[0]->getType())->getElementType(),
Ops[1]->getType()};
Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
+ // insert undef passthru
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
break;
}
}],
@@ -1438,6 +1439,8 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
Ops[1]->getType(),
Ops[1]->getType()};
Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
+ // insert undef passthru
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
break;
}
}],
@@ -1583,15 +1586,18 @@ defm : RVVIndexedSegStore<"vsoxseg">;
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
+let HasNoMaskPassThru = true in {
defm vadd : RVVIntBinBuiltinSet;
defm vsub : RVVIntBinBuiltinSet;
defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
[["vx", "v", "vve"],
["vx", "Uv", "UvUvUe"]]>;
+}
defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">;
// 12.2. Vector Widening Integer Add/Subtract
// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW
+let HasNoMaskPassThru = true in {
defm vwaddu : RVVUnsignedWidenBinBuiltinSet;
defm vwsubu : RVVUnsignedWidenBinBuiltinSet;
// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW
@@ -1603,6 +1609,7 @@ defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet;
// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW
defm vwadd : RVVSignedWidenOp0BinBuiltinSet;
defm vwsub : RVVSignedWidenOp0BinBuiltinSet;
+}
defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi",
[["Uw", "UwUv"]]>;
defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
@@ -1633,12 +1640,15 @@ let HasMask = false, HasPolicy = false in {
}
// 12.5. Vector Bitwise Logical Instructions
+let HasNoMaskPassThru = true in {
defm vand : RVVIntBinBuiltinSet;
defm vxor : RVVIntBinBuiltinSet;
defm vor : RVVIntBinBuiltinSet;
+}
defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">;
// 12.6. Vector Single-Width Bit Shift Instructions
+let HasNoMaskPassThru = true in {
defm vsll : RVVShiftBuiltinSet;
defm vsrl : RVVUnsignedShiftBuiltinSet;
defm vsra : RVVSignedShiftBuiltinSet;
@@ -1646,6 +1656,7 @@ defm vsra : RVVSignedShiftBuiltinSet;
// 12.7. Vector Narrowing Integer Right Shift Instructions
defm vnsrl : RVVUnsignedNShiftBuiltinSet;
defm vnsra : RVVSignedNShiftBuiltinSet;
+}
defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
[["v", "vw"],
["Uv", "UvUw"]]>;
@@ -1665,6 +1676,7 @@ defm vmsge : RVVSignedMaskOutBuiltinSet;
}
// 12.9. Vector Integer Min/Max Instructions
+let HasNoMaskPassThru = true in {
defm vminu : RVVUnsignedBinBuiltinSet;
defm vmin : RVVSignedBinBuiltinSet;
defm vmaxu : RVVUnsignedBinBuiltinSet;
@@ -1685,9 +1697,10 @@ defm vdivu : RVVUnsignedBinBuiltinSet;
defm vdiv : RVVSignedBinBuiltinSet;
defm vremu : RVVUnsignedBinBuiltinSet;
defm vrem : RVVSignedBinBuiltinSet;
+}
// 12.12. Vector Widening Integer Multiply Instructions
-let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+let Log2LMUL = [-3, -2, -1, 0, 1, 2], HasNoMaskPassThru = true in {
defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi",
[["vv", "w", "wvv"],
["vx", "w", "wve"]]>;
@@ -1751,6 +1764,7 @@ let HasMask = false, HasPolicy = false in {
// 13. Vector Fixed-Point Arithmetic Instructions
// 13.1. Vector Single-Width Saturating Add and Subtract
+let HasNoMaskPassThru = true in {
defm vsaddu : RVVUnsignedBinBuiltinSet;
defm vsadd : RVVSignedBinBuiltinSet;
defm vssubu : RVVUnsignedBinBuiltinSet;
@@ -1800,6 +1814,7 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in {
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
}
+}
// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
defm vfmacc : RVVFloatingTerBuiltinSet;
@@ -1827,6 +1842,7 @@ def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
def vfrec7 : RVVFloatingUnaryVVBuiltin;
// 14.11. Vector Floating-Point MIN/MAX Instructions
+let HasNoMaskPassThru = true in {
defm vfmin : RVVFloatingBinBuiltinSet;
defm vfmax : RVVFloatingBinBuiltinSet;
@@ -1834,6 +1850,7 @@ defm vfmax : RVVFloatingBinBuiltinSet;
defm vfsgnj : RVVFloatingBinBuiltinSet;
defm vfsgnjn : RVVFloatingBinBuiltinSet;
defm vfsgnjx : RVVFloatingBinBuiltinSet;
+}
defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
@@ -2005,6 +2022,7 @@ defm vslideup : RVVSlideBuiltinSet;
defm vslidedown : RVVSlideBuiltinSet;
// 17.3.3. Vector Slide1up Instructions
+let HasNoMaskPassThru = true in {
defm vslide1up : RVVSlideOneBuiltinSet;
defm vfslide1up : RVVFloatingBinVFBuiltinSet;
@@ -2027,6 +2045,7 @@ defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil",
[["vx", "Uv", "UvUvz"]]>;
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
[["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>;
+}
// 17.5. Vector Compress Instruction
let HasMask = false, HasPolicy = false,
diff --git a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
index dab41f1695e2e..e8445d40766f0 100644
--- a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
+++ b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
@@ -25,7 +25,7 @@ vint8m1_t vadd_generic (vint8m1_t op0, vint8m1_t op1, size_t op2);
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i8>, <vscale x 8 x i8>* [[OP0_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 8 x i8>, <vscale x 8 x i8>* [[OP1_ADDR]], align 1
// CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[VL_ADDR]], align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[TMP0]], <vscale x 8 x i8> [[TMP1]], i64 [[TMP2]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[TMP0]], <vscale x 8 x i8> [[TMP1]], i64 [[TMP2]])
// CHECK-NEXT: store <vscale x 8 x i8> [[TMP3]], <vscale x 8 x i8>* [[RET]], align 1
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 8 x i8>, <vscale x 8 x i8>* [[RET]], align 1
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP4]]
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
index 9b6b966723e39..8e7c4a76e8c4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
index e3ff19a319483..bc05aa00acc89 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
index 909da393f7665..9bddb61f8cf3e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
index 8def45998e1b8..98570c85b4150 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
index e5f8d3f645c04..e0e804dc6baff 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
index f784db297373a..7d098fa95e125 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
index 0d7c709183905..455c0dcb93479 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
@@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
@@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
@@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
index f8c882ee23f31..05e3acb4cf3cf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
index 1ae5f289ca667..c8f874db1b2a2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
index 560d03030b8ba..2328aafb91311 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
index 6051fcf8dceec..c5f9992f2baf5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
index b2f4fb383db44..c688b4c9ae1bc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
index 8c19f7bd147f9..ed789ae936926 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
index e407104b992a2..e975dae29dd15 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
index e5fa14a92604b..a1e2c0c24a24a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -178,7 +178,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -188,7 +188,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -207,7 +207,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -216,7 +216,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -226,7 +226,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -235,7 +235,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -245,7 +245,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -254,7 +254,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -264,7 +264,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -273,7 +273,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -292,7 +292,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -302,7 +302,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -311,7 +311,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -321,7 +321,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -330,7 +330,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -340,7 +340,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -349,7 +349,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -359,7 +359,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -378,7 +378,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -387,7 +387,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -397,7 +397,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -406,7 +406,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -416,7 +416,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -425,7 +425,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -435,7 +435,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -444,7 +444,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -454,7 +454,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -463,7 +463,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -473,7 +473,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -482,7 +482,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -492,7 +492,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -501,7 +501,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -511,7 +511,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
index e70c63f05770a..550c4e2085fd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
@@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
@@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
@@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
@@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
@@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
@@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
@@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
index 989fd283b920f..794ecc7a52830 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
@@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
@@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
@@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
@@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
@@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
@@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
@@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
index 2df9853fdfe54..5cf294713fdc3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
index 2a5bf9f06f114..73d50704347e4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -36,7 +36,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -55,7 +55,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -74,7 +74,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -93,7 +93,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -112,7 +112,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -131,7 +131,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
index 8d5396c32933e..fe0eebfd952ab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
index 2dd4350fa9c7a..4d712614bc649 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -36,7 +36,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -55,7 +55,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -74,7 +74,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -93,7 +93,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -112,7 +112,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -131,7 +131,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
index ee0948836af2a..7d836785e57f1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
index 5c269c17f1ea5..0c6bf71d57fc7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
index a69e943a6a2bc..722699306bc74 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
@@ -9,7 +9,7 @@
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
@@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
@@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
index b08b304c7ef5e..a93c1d919ec8b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
index 29ccbe114c8b6..bd2af8f9d2ed9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
@@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
@@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
@@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
@@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
@@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
@@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
@@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
@@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
@@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
@@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
@@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
@@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
@@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
@@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
index d25b1b4c414a3..104a5746c1e42 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
@@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
@@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
@@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
@@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
@@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
@@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
@@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
@@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
@@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
@@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
@@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
@@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
@@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
@@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
index a52e5677e01fc..d6d3ea9188a31 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
@@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
@@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
@@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
@@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
@@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
@@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
@@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
@@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
@@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
@@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
@@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
index 642055f09fab4..696d892164919 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
@@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
@@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
@@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
@@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
@@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
@@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
@@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
@@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
@@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
@@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
@@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
@@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
@@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
@@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
@@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
@@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
@@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
@@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
@@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
@@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
@@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
@@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
@@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
@@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
@@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
@@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
@@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
@@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
@@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
@@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
@@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
@@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
index c911a400be40a..f580d8ccb4dba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
index 315a6802b8ba0..7a1b4618fe2f8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
index 526f78ad58630..44349ce19d20f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
index ddff28ea9d6c1..e8041d4d1a9bc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
index 7e4ef5e0ba950..1a2ddb813927f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
@@ -17,7 +17,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
@@ -26,7 +26,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
@@ -36,7 +36,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
@@ -45,7 +45,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
@@ -55,7 +55,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
@@ -64,7 +64,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -73,7 +73,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
@@ -82,7 +82,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -91,7 +91,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
@@ -100,7 +100,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -109,7 +109,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
@@ -118,7 +118,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -127,7 +127,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
@@ -136,7 +136,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
@@ -146,7 +146,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
@@ -155,7 +155,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
@@ -165,7 +165,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
@@ -174,7 +174,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
@@ -184,7 +184,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
@@ -193,7 +193,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
@@ -203,7 +203,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
@@ -212,7 +212,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
@@ -222,7 +222,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
@@ -231,7 +231,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
@@ -241,7 +241,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
@@ -250,7 +250,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
@@ -260,7 +260,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
@@ -269,7 +269,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
@@ -279,7 +279,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
@@ -288,7 +288,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
@@ -298,7 +298,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
@@ -307,7 +307,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
@@ -317,7 +317,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
@@ -326,7 +326,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
@@ -336,7 +336,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
@@ -345,7 +345,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
@@ -355,7 +355,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
@@ -364,7 +364,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
@@ -374,7 +374,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
@@ -383,7 +383,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
@@ -393,7 +393,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
@@ -402,7 +402,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
@@ -412,7 +412,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
@@ -421,7 +421,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
@@ -431,7 +431,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
@@ -440,7 +440,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
@@ -450,7 +450,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
@@ -459,7 +459,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
@@ -469,7 +469,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
@@ -478,7 +478,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -487,7 +487,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
@@ -496,7 +496,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -505,7 +505,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
@@ -514,7 +514,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -523,7 +523,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
@@ -532,7 +532,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -541,7 +541,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
@@ -550,7 +550,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
@@ -560,7 +560,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
@@ -570,7 +570,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
@@ -580,7 +580,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
@@ -590,7 +590,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
@@ -600,7 +600,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
@@ -619,7 +619,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
@@ -628,7 +628,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
@@ -638,7 +638,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
@@ -647,7 +647,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
@@ -657,7 +657,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
@@ -666,7 +666,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
@@ -676,7 +676,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
@@ -686,7 +686,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
@@ -696,7 +696,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
@@ -705,7 +705,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
@@ -715,7 +715,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
@@ -724,7 +724,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
@@ -734,7 +734,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
@@ -743,7 +743,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
@@ -753,7 +753,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
@@ -762,7 +762,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
@@ -772,7 +772,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
@@ -781,7 +781,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
@@ -791,7 +791,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
@@ -800,7 +800,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
@@ -810,7 +810,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
@@ -819,7 +819,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
@@ -829,7 +829,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
@@ -838,7 +838,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
@@ -848,7 +848,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
@@ -858,7 +858,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
@@ -868,7 +868,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
@@ -877,7 +877,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
@@ -887,7 +887,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
@@ -896,7 +896,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
@@ -906,7 +906,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
@@ -915,7 +915,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
@@ -925,7 +925,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
@@ -934,7 +934,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
@@ -944,7 +944,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
@@ -953,7 +953,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
@@ -963,7 +963,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
@@ -972,7 +972,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
@@ -982,7 +982,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
@@ -991,7 +991,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
@@ -1001,7 +1001,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
@@ -1010,7 +1010,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
@@ -1020,7 +1020,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
@@ -1030,7 +1030,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
@@ -1040,7 +1040,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
@@ -1077,7 +1077,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
@@ -1087,7 +1087,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
@@ -1097,7 +1097,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
@@ -1107,7 +1107,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
@@ -1117,7 +1117,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
@@ -1127,7 +1127,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
@@ -1137,7 +1137,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
@@ -1147,7 +1147,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
@@ -1157,7 +1157,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
@@ -1167,7 +1167,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
@@ -1177,7 +1177,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
@@ -1187,7 +1187,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
@@ -1197,7 +1197,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
@@ -1207,7 +1207,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
@@ -1217,7 +1217,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
@@ -1227,7 +1227,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
@@ -1247,7 +1247,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
@@ -1267,7 +1267,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
@@ -1277,7 +1277,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -1287,7 +1287,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -1297,7 +1297,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -1307,7 +1307,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -1317,7 +1317,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -1327,7 +1327,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -1337,7 +1337,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -1347,7 +1347,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
@@ -1357,7 +1357,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
@@ -1367,7 +1367,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
@@ -1377,7 +1377,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
@@ -1387,7 +1387,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
@@ -1397,7 +1397,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
@@ -1407,7 +1407,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
@@ -1417,7 +1417,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
@@ -1427,7 +1427,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
@@ -1437,7 +1437,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
@@ -1447,7 +1447,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
@@ -1457,7 +1457,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
@@ -1467,7 +1467,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
@@ -1477,7 +1477,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
@@ -1487,7 +1487,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
@@ -1497,7 +1497,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
@@ -1507,7 +1507,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
index d313ab0d0e0dc..e5d8565ce1ab3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
index a5418573ac47c..627b5c96cb10c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
index 0e200dc4cb546..b935758780a9c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
@@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
@@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
index 7d8056e097bbf..bbd6d6554b923 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
@@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
@@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
@@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
index 5101560b0e4e0..d1193186e04ce 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
index dc05b51b2a6aa..f67a014f5b3df 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
index 3905826cb0d80..7a0374cf0f273 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
index c7855bb0d02d0..6455f3804bc8e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
index bf0e538fc331b..968377dc5ffd5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
index 2427f374cdb39..7ce1fe3917b63 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
index d393fa1214fc5..d723aa470c6ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
index 04b91eb785da2..e547e43221a1d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
index 720ddadf5123b..7cb60680fe769 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
index 7d66d0940473e..73d9fd62c19a1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
index 172ea75b07b42..f431ea506d28c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
@@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
@@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
@@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
@@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
@@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
@@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
@@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
@@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
@@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
@@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
@@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
@@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
@@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
@@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
@@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
@@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
@@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
@@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
@@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
@@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
@@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
@@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
@@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
@@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
@@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
index 3e6eaaa02da9f..09e6e487cee7e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
index 9080a62a47bad..d355f29105463 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
index 52d0d079453a1..d00be9ae74c8b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
index cf6281261b421..77afacbcac10f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
index f8070ce5a8312..bcfbcb07de7d5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
index 54f63cd2bf58d..26b8e9a2446d3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
index 311881eb70192..ac701156a5621 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
index 7d18db0cad686..3451d362e0bb6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
index 18495dc881ca3..94a75de02b53e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
@@ -179,7 +179,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
index 4757cd9827aa6..e67009a3fbad9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
@@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
@@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
@@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
index c2c5b12aeae09..c7b2527657150 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
index 052c02faa5897..299d4cb009cd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
index 29c2b4c676021..3ef7f0abe6b63 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
index 0be537645af31..562eb614109f0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
index 50dc403299221..64ed62716e8b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
@@ -179,7 +179,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
index c201f27cc32f3..92c9bce681abc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
index 7428f43db11b7..12974a81c0936 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
index 277b803b9d687..ab485498bd051 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -189,7 +189,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -208,7 +208,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -217,7 +217,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -227,7 +227,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -236,7 +236,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -246,7 +246,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -255,7 +255,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -274,7 +274,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -284,7 +284,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -293,7 +293,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -303,7 +303,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -312,7 +312,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -322,7 +322,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -331,7 +331,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -341,7 +341,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -350,7 +350,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -360,7 +360,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -369,7 +369,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -379,7 +379,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -388,7 +388,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -398,7 +398,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -407,7 +407,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -417,7 +417,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -426,7 +426,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -436,7 +436,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -445,7 +445,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -455,7 +455,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -464,7 +464,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -474,7 +474,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -483,7 +483,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -493,7 +493,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -502,7 +502,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -512,7 +512,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -1090,7 +1090,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -1099,7 +1099,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -1108,7 +1108,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -1117,7 +1117,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -1126,7 +1126,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -1135,7 +1135,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -1144,7 +1144,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -1153,7 +1153,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -1162,7 +1162,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1171,7 +1171,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1180,7 +1180,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1189,7 +1189,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1198,7 +1198,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -1207,7 +1207,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -1216,7 +1216,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -1225,7 +1225,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -1234,7 +1234,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -1243,7 +1243,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -1252,7 +1252,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -1261,7 +1261,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -1270,7 +1270,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1279,7 +1279,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1288,7 +1288,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1297,7 +1297,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1306,7 +1306,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -1315,7 +1315,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -1324,7 +1324,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -1333,7 +1333,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -1342,7 +1342,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -1351,7 +1351,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -1360,7 +1360,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -1369,7 +1369,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -1378,7 +1378,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1387,7 +1387,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1396,7 +1396,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1405,7 +1405,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
index 9c01c32bbf96b..624e55f32a292 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
@@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
@@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
@@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
@@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
@@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
@@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
@@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
@@ -203,7 +203,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16.i64(<vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) {
@@ -212,7 +212,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, si
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16.i64(<vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) {
@@ -221,7 +221,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, si
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16.i64(<vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) {
@@ -230,7 +230,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16.i64(<vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) {
@@ -239,7 +239,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16.i64(<vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) {
@@ -248,7 +248,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16.i64(<vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
index 389c762266d88..493473e6bcc56 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
@@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
@@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
@@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
@@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
@@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
@@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
@@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
@@ -198,7 +198,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16.i64(<vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) {
@@ -207,7 +207,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16.i64(<vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) {
@@ -216,7 +216,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16.i64(<vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) {
@@ -225,7 +225,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16.i64(<vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) {
@@ -234,7 +234,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16.i64(<vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) {
@@ -243,7 +243,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16.i64(<vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
index 88a4c9aadb856..bbb00daab419b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
index 87f59f8483305..818c16262678c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -37,7 +37,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -56,7 +56,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -75,7 +75,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -94,7 +94,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -113,7 +113,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -132,7 +132,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16.i64(<vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) {
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16.i64(<vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -409,7 +409,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16.i64(<vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) {
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -445,7 +445,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16.i64(<vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) {
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16.i64(<vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
index 0254680b38d2e..49ca097e060b3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -168,7 +168,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -186,7 +186,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -204,7 +204,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -222,7 +222,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
index 410b3c89738b1..e295290fe1f5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -37,7 +37,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -56,7 +56,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -75,7 +75,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -94,7 +94,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -113,7 +113,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -132,7 +132,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16.i64(<vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) {
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16.i64(<vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -409,7 +409,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16.i64(<vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) {
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -445,7 +445,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16.i64(<vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) {
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16.i64(<vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
index 17a4c55b87bab..6c24e04056c80 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
index bb31db3465e65..db1a83f366185 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
index 97686762db666..48f138669bfd8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
@@ -9,7 +9,7 @@
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
@@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
@@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
index c2c4522ca174b..2e0c093f81d4c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
index 81dc0a7ee0d8f..582b4f3d2680b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
@@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
@@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
@@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
@@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
@@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
@@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
@@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
@@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
@@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
@@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
@@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
@@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
@@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
@@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
index 6bdb8baec8ab8..3b5dd626c29d5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
@@ -69,7 +69,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
@@ -78,7 +78,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
@@ -87,7 +87,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
@@ -123,7 +123,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
@@ -132,7 +132,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
@@ -141,7 +141,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
@@ -168,7 +168,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
@@ -177,7 +177,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
@@ -213,7 +213,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
@@ -222,7 +222,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
@@ -231,7 +231,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
@@ -240,7 +240,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
@@ -249,7 +249,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
@@ -258,7 +258,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
index e83e29b7244c5..8501b4c2e1f06 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
@@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
@@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
@@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
@@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
@@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
@@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
@@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
@@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
@@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
@@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
@@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vneg_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
index 0736b3c35f461..e8a975946a394 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
@@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
@@ -88,7 +88,7 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
@@ -97,7 +97,7 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
@@ -106,7 +106,7 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
@@ -115,7 +115,7 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
@@ -124,7 +124,7 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
@@ -133,7 +133,7 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
@@ -142,7 +142,7 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
@@ -151,7 +151,7 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
@@ -169,7 +169,7 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
@@ -178,7 +178,7 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
@@ -196,7 +196,7 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
@@ -205,7 +205,7 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
@@ -214,7 +214,7 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
@@ -223,7 +223,7 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
@@ -232,7 +232,7 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
@@ -241,7 +241,7 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
@@ -259,7 +259,7 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
@@ -268,7 +268,7 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
@@ -277,7 +277,7 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
@@ -286,7 +286,7 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
@@ -295,7 +295,7 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
@@ -304,7 +304,7 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
@@ -313,7 +313,7 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
@@ -322,7 +322,7 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
@@ -331,7 +331,7 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
@@ -340,7 +340,7 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
@@ -349,7 +349,7 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
@@ -358,7 +358,7 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
@@ -367,7 +367,7 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
@@ -376,7 +376,7 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
@@ -385,7 +385,7 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
@@ -394,7 +394,7 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vnot_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
index f86d5353acd91..c3cfb74a52998 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
index 33d3479ec7abe..92743610fd1b6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
index 95630044f5e3b..ee25469f6ffca 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
index 01024f14a0407..3164f1d57923b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
index afc7638555f82..6e787cd485c73 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
@@ -18,7 +18,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
@@ -27,7 +27,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
@@ -37,7 +37,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
@@ -46,7 +46,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
@@ -56,7 +56,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
@@ -65,7 +65,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -74,7 +74,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
@@ -83,7 +83,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -92,7 +92,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
@@ -101,7 +101,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -110,7 +110,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
@@ -119,7 +119,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -128,7 +128,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
@@ -137,7 +137,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
@@ -147,7 +147,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
@@ -156,7 +156,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
@@ -166,7 +166,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
@@ -175,7 +175,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
@@ -185,7 +185,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
@@ -194,7 +194,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
@@ -204,7 +204,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
@@ -213,7 +213,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
@@ -223,7 +223,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
@@ -232,7 +232,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
@@ -242,7 +242,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
@@ -251,7 +251,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
@@ -261,7 +261,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
@@ -270,7 +270,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
@@ -280,7 +280,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
@@ -289,7 +289,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
@@ -299,7 +299,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
@@ -308,7 +308,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
@@ -318,7 +318,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
@@ -327,7 +327,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
@@ -337,7 +337,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
@@ -346,7 +346,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
@@ -356,7 +356,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
@@ -365,7 +365,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
@@ -375,7 +375,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
@@ -384,7 +384,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
@@ -394,7 +394,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
@@ -403,7 +403,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
@@ -413,7 +413,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
@@ -422,7 +422,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
@@ -432,7 +432,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
@@ -441,7 +441,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
@@ -451,7 +451,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
@@ -460,7 +460,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
@@ -470,7 +470,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
@@ -479,7 +479,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -488,7 +488,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
@@ -497,7 +497,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -506,7 +506,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
@@ -515,7 +515,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -524,7 +524,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
@@ -533,7 +533,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -542,7 +542,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
@@ -551,7 +551,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
@@ -561,7 +561,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
@@ -571,7 +571,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
@@ -581,7 +581,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
@@ -591,7 +591,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
@@ -601,7 +601,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
@@ -610,7 +610,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
@@ -620,7 +620,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
@@ -639,7 +639,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
@@ -648,7 +648,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
@@ -658,7 +658,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
@@ -677,7 +677,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
@@ -687,7 +687,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
@@ -697,7 +697,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
@@ -706,7 +706,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
@@ -716,7 +716,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
@@ -725,7 +725,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
@@ -735,7 +735,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
@@ -744,7 +744,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
@@ -754,7 +754,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
@@ -763,7 +763,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
@@ -773,7 +773,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
@@ -782,7 +782,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
@@ -792,7 +792,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
@@ -801,7 +801,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
@@ -811,7 +811,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
@@ -820,7 +820,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
@@ -830,7 +830,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
@@ -839,7 +839,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
@@ -849,7 +849,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
@@ -859,7 +859,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
@@ -869,7 +869,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
@@ -878,7 +878,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
@@ -888,7 +888,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
@@ -897,7 +897,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
@@ -907,7 +907,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
@@ -916,7 +916,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
@@ -926,7 +926,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
@@ -935,7 +935,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
@@ -945,7 +945,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
@@ -954,7 +954,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
@@ -964,7 +964,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
@@ -973,7 +973,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
@@ -983,7 +983,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
@@ -992,7 +992,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
@@ -1002,7 +1002,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
@@ -1011,7 +1011,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
@@ -1021,7 +1021,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
@@ -1031,7 +1031,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
@@ -1041,7 +1041,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
@@ -1078,7 +1078,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
@@ -1088,7 +1088,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
@@ -1098,7 +1098,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
@@ -1108,7 +1108,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
@@ -1118,7 +1118,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
@@ -1128,7 +1128,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
@@ -1138,7 +1138,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
@@ -1148,7 +1148,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
@@ -1158,7 +1158,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
@@ -1168,7 +1168,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
@@ -1178,7 +1178,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
@@ -1188,7 +1188,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
@@ -1198,7 +1198,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
@@ -1208,7 +1208,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
@@ -1218,7 +1218,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
@@ -1228,7 +1228,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
@@ -1238,7 +1238,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
@@ -1258,7 +1258,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
@@ -1268,7 +1268,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
@@ -1278,7 +1278,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -1288,7 +1288,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -1298,7 +1298,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -1308,7 +1308,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -1318,7 +1318,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -1328,7 +1328,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -1338,7 +1338,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -1348,7 +1348,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
@@ -1358,7 +1358,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
@@ -1368,7 +1368,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
@@ -1378,7 +1378,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
@@ -1388,7 +1388,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
@@ -1398,7 +1398,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
@@ -1408,7 +1408,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
@@ -1418,7 +1418,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
@@ -1428,7 +1428,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
@@ -1438,7 +1438,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
@@ -1448,7 +1448,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
@@ -1458,7 +1458,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
@@ -1468,7 +1468,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
@@ -1478,7 +1478,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
@@ -1488,7 +1488,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
@@ -1498,7 +1498,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
@@ -1508,7 +1508,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
@@ -3212,7 +3212,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
@@ -3221,7 +3221,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, si
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) {
@@ -3230,7 +3230,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t v
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
@@ -3239,7 +3239,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, si
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) {
@@ -3248,7 +3248,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t v
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
@@ -3257,7 +3257,7 @@ vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) {
@@ -3266,7 +3266,7 @@ vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl)
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
@@ -3275,7 +3275,7 @@ vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) {
@@ -3284,7 +3284,7 @@ vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl)
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
@@ -3293,7 +3293,7 @@ vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) {
@@ -3302,7 +3302,7 @@ vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl)
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
@@ -3311,7 +3311,7 @@ vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) {
@@ -3320,7 +3320,7 @@ vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl)
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3329,7 +3329,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -3338,7 +3338,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -3347,7 +3347,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -3356,7 +3356,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -3365,7 +3365,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
index d7688d2642c4d..b5f951ff8c828 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
index abfe106708dcd..166bc19fca8d8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
index 867fe098640d7..26b72ea52e98e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
@@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
@@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
index 61d7023eb431c..b903528b48e83 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
@@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
@@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
@@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
index a7f2dec736893..67e7033896bb0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
index 43983b15f18d5..c92c792049755 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
@@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
index e1db9d589ec65..fd821e4cbd79c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
index ddf1f8bce3c97..2b3352e3fc188 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
index 3150746b841dc..c7d59a775a067 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
index 95e9cf843fc66..b14e0b2105738 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
index fccf57aecfb1d..464d78ce9d569 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
index dc16819e04db3..1ed8555c70cf9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
index 4275d13f9d714..30929ff6a773c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
index 77036c4d6e752..93f3581d87207 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
index 4c4c6a26861b3..03d06da6b0b9b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
@@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
@@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
@@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
@@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
@@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
@@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
@@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
@@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
@@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
@@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
@@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
@@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
@@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
@@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
@@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
@@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
@@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
@@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
@@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
@@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
@@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
@@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
@@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
@@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
@@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
index 023133b8f331d..3a86c65edb6de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
index 9422cd4d1b691..f2b4098676383 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
index be076ea0b5039..0e775104dabd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
@@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index eb718ff8a49ea..9888e6c6862cf 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -162,6 +162,7 @@ class RVVIntrinsic {
bool IsMask;
bool HasVL;
bool HasPolicy;
+ bool HasNoMaskPassThru;
bool HasNoMaskedOverloaded;
bool HasAutoDef; // There is automiatic definition in header
std::string ManualCodegen;
@@ -177,8 +178,8 @@ class RVVIntrinsic {
RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
StringRef MangledSuffix, StringRef IRName, bool IsMask,
bool HasMaskedOffOperand, bool HasVL, bool HasPolicy,
- bool HasNoMaskedOverloaded, bool HasAutoDef,
- StringRef ManualCodegen, const RVVTypes &Types,
+ bool HasNoMaskPassThru, bool HasNoMaskedOverloaded,
+ bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures, unsigned NF);
~RVVIntrinsic() = default;
@@ -188,6 +189,7 @@ class RVVIntrinsic {
StringRef getMangledName() const { return MangledName; }
bool hasVL() const { return HasVL; }
bool hasPolicy() const { return HasPolicy; }
+ bool hasNoMaskPassThru() const { return HasNoMaskPassThru; }
bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; }
bool hasManualCodegen() const { return !ManualCodegen.empty(); }
bool hasAutoDef() const { return HasAutoDef; }
@@ -770,12 +772,14 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
StringRef NewMangledName, StringRef MangledSuffix,
StringRef IRName, bool IsMask,
bool HasMaskedOffOperand, bool HasVL, bool HasPolicy,
- bool HasNoMaskedOverloaded, bool HasAutoDef,
- StringRef ManualCodegen, const RVVTypes &OutInTypes,
+ bool HasNoMaskPassThru, bool HasNoMaskedOverloaded,
+ bool HasAutoDef, StringRef ManualCodegen,
+ const RVVTypes &OutInTypes,
const std::vector<int64_t> &NewIntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures,
unsigned NF)
: IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy),
+ HasNoMaskPassThru(HasNoMaskPassThru),
HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
ManualCodegen(ManualCodegen.str()), NF(NF) {
@@ -823,7 +827,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
// IntrinsicTypes is nonmasked version index. Need to update it
// if there is maskedoff operand (It is always in first operand).
IntrinsicTypes = NewIntrinsicTypes;
- if (IsMask && HasMaskedOffOperand) {
+ if ((IsMask && HasMaskedOffOperand) || (!IsMask && HasNoMaskPassThru)) {
for (auto &I : IntrinsicTypes) {
if (I >= 0)
I += NF;
@@ -860,6 +864,9 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
} else {
OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
}
+ } else if (hasNoMaskPassThru()) {
+ OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n";
+ OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n";
}
OS << " IntrinsicTypes = {";
@@ -1107,6 +1114,8 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) {
PrintFatalError("Builtin with same name has
diff erent HasPolicy");
else if (P.first->second->hasPolicy() != Def->hasPolicy())
PrintFatalError("Builtin with same name has
diff erent HasPolicy");
+ else if (P.first->second->hasNoMaskPassThru() != Def->hasNoMaskPassThru())
+ PrintFatalError("Builtin with same name has
diff erent HasNoMaskPassThru");
else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes())
PrintFatalError("Builtin with same name has
diff erent IntrinsicTypes");
}
@@ -1154,6 +1163,7 @@ void RVVEmitter::createRVVIntrinsics(
bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
bool HasVL = R->getValueAsBit("HasVL");
bool HasPolicy = R->getValueAsBit("HasPolicy");
+ bool HasNoMaskPassThru = R->getValueAsBit("HasNoMaskPassThru");
bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded");
std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
StringRef ManualCodegen = R->getValueAsString("ManualCodegen");
@@ -1228,8 +1238,8 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy,
- HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(),
- IntrinsicTypes, RequiredFeatures, NF));
+ HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
+ Types.getValue(), IntrinsicTypes, RequiredFeatures, NF));
if (HasMask) {
// Create a mask intrinsic
Optional<RVVTypes> MaskTypes =
@@ -1237,8 +1247,9 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
/*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy,
- HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
- MaskTypes.getValue(), IntrinsicTypes, RequiredFeatures, NF));
+ HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef,
+ ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes,
+ RequiredFeatures, NF));
}
} // end for Log2LMULList
} // end for TypeRange
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 57b0f792ea564..9ec8a02f31847 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -383,12 +383,13 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is the same as first and second source vector.
- // Input: (vector_in, int_vector_in, vl)
+ // Input: (passthru, vector_in, int_vector_in, vl)
class RISCVRGatherVVNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int_vector_in, vl, ta)
@@ -400,13 +401,14 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 4;
}
- // Input: (vector_in, int16_vector_in, vl)
+ // Input: (passthru, vector_in, int16_vector_in, vl)
class RISCVRGatherEI16VVNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int16_vector_in, vl, ta)
@@ -421,12 +423,13 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is the same as first source vector, and the
// second operand is XLen.
- // Input: (vector_in, xlen_in, vl)
+ // Input: (passthru, vector_in, xlen_in, vl)
class RISCVGatherVXNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
+ LLVMMatchType<1>],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// Second operand is XLen.
@@ -440,13 +443,14 @@ let TargetPrefix = "riscv" in {
let VLOperand = 4;
}
// For destination vector type is the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVBinaryAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
+ let SplatOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
@@ -461,12 +465,13 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is the same as first source vector. The
// second source operand must match the destination type or be an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVBinaryAAShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
@@ -480,13 +485,14 @@ let TargetPrefix = "riscv" in {
let VLOperand = 4;
}
// For destination vector type is NOT the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVBinaryABXNoMask
: Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
+ let SplatOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is NOT the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
@@ -501,12 +507,13 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is NOT the same as first source vector. The
// second source operand must match the destination type or be an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVBinaryABShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For destination vector type is NOT the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
@@ -595,13 +602,14 @@ let TargetPrefix = "riscv" in {
}
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVSaturatingBinaryAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
+ let SplatOperand = 2;
+ let VLOperand = 3;
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
@@ -618,12 +626,13 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVSaturatingBinaryAAShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
@@ -640,12 +649,13 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations.
// The destination vector type is NOT the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
+ // Input: (passthru, vector_in, vector_in/scalar_in, vl)
class RISCVSaturatingBinaryABShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
+ llvm_anyint_ty],
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let VLOperand = 2;
+ let VLOperand = 3;
}
// For Saturating binary operations with mask.
// The destination vector type is NOT the same as first source vector (with mask).
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8ddbb072656ad..53bdd4b64904c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4475,10 +4475,12 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
InsertI64VL);
// First slide in the hi value, then the lo in underneath it.
- ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
- ValHi, I32Mask, InsertI64VL);
- ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
- ValLo, I32Mask, InsertI64VL);
+ ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
+ DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
+ I32Mask, InsertI64VL);
+ ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
+ DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
+ I32Mask, InsertI64VL);
// Bitcast back to the right container type.
ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
}
@@ -4774,8 +4776,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// We need to special case these when the scalar is larger than XLen.
unsigned NumOps = Op.getNumOperands();
bool IsMasked = NumOps == 7;
- unsigned OpOffset = IsMasked ? 1 : 0;
- SDValue Scalar = Op.getOperand(2 + OpOffset);
+ SDValue Scalar = Op.getOperand(3);
if (Scalar.getValueType().bitsLE(XLenVT))
break;
@@ -4790,7 +4791,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// Convert the vector source to the equivalent nxvXi32 vector.
MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
- SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
+ SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(2));
SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
DAG.getConstant(0, DL, XLenVT));
@@ -4807,17 +4808,34 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// Shift the two scalar parts in using SEW=32 slide1up/slide1down
// instructions.
- if (IntNo == Intrinsic::riscv_vslide1up ||
- IntNo == Intrinsic::riscv_vslide1up_mask) {
- Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
- I32Mask, I32VL);
- Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
- I32Mask, I32VL);
+ SDValue Passthru = DAG.getBitcast(I32VT, Op.getOperand(1));
+ if (!IsMasked) {
+ if (IntNo == Intrinsic::riscv_vslide1up) {
+ Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
+ ScalarHi, I32Mask, I32VL);
+ Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
+ ScalarLo, I32Mask, I32VL);
+ } else {
+ Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
+ ScalarLo, I32Mask, I32VL);
+ Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
+ ScalarHi, I32Mask, I32VL);
+ }
} else {
- Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
- I32Mask, I32VL);
- Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
- I32Mask, I32VL);
+ // TODO Those VSLIDE1 could be TAMA because we use vmerge to select
+ // maskedoff
+ SDValue Undef = DAG.getUNDEF(I32VT);
+ if (IntNo == Intrinsic::riscv_vslide1up_mask) {
+ Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec,
+ ScalarHi, I32Mask, I32VL);
+ Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec,
+ ScalarLo, I32Mask, I32VL);
+ } else {
+ Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec,
+ ScalarLo, I32Mask, I32VL);
+ Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec,
+ ScalarHi, I32Mask, I32VL);
+ }
}
// Convert back to nxvXi64.
@@ -4825,11 +4843,21 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (!IsMasked)
return Vec;
-
// Apply mask after the operation.
SDValue Mask = Op.getOperand(NumOps - 3);
SDValue MaskedOff = Op.getOperand(1);
- return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
+ // Assume Policy operand is the last operand.
+ uint64_t Policy = Op.getConstantOperandVal(NumOps - 1);
+ // We don't need to select maskedoff if it's undef.
+ if (MaskedOff.isUndef())
+ return Vec;
+ // TAMU
+ if (Policy == RISCVII::TAIL_AGNOSTIC)
+ return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
+ VL);
+ // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
+ // It's fine because vmerge does not care mask policy.
+ return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL);
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 2d1da98588d52..134d5fa89739f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -155,9 +155,9 @@ enum NodeType : unsigned {
// and the fifth the VL.
VSLIDEUP_VL,
VSLIDEDOWN_VL,
- // Matches the semantics of vslide1up/slide1down. The first operand is the
- // source vector, the second is the XLenVT scalar value. The third and fourth
- // operands are the mask and VL operands.
+ // Matches the semantics of vslide1up/slide1down. The first operand is
+ // passthru operand, the second is source vector, third is the XLenVT scalar
+ // value. The fourth and fifth operands are the mask and VL operands.
VSLIDE1UP_VL,
VSLIDE1DOWN_VL,
// Matches the semantics of the vid.v instruction, with a mask and VL
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 25b02824c1450..72543485dc8d9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -996,6 +996,24 @@ class VPseudoBinaryNoMask<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoBinaryNoMaskTU<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ string Constraint> :
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let HasMergeOp = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
// Special version of VPseudoBinaryNoMask where we pretend the first source is
// tied to the destination.
// This allows maskedoff and rs2 to be the same register.
@@ -1017,6 +1035,25 @@ class VPseudoTiedBinaryNoMask<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoTiedBinaryNoMaskTU<VReg RetClass,
+ DAGOperand Op2Class,
+ string Constraint> :
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$merge,
+ Op2Class:$rs1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasMergeOp = 0; // Merge is also rs2.
+ let HasDummyMask = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
Pseudo<(outs),
@@ -1652,6 +1689,8 @@ multiclass VPseudoBinary<VReg RetClass,
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
+ def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
+ Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
Constraint>,
RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
@@ -1681,6 +1720,8 @@ multiclass VPseudoBinaryEmul<VReg RetClass,
let VLMul = lmul.value in {
def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
+ def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
+ Constraint>;
def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
Constraint>;
}
@@ -1693,6 +1734,8 @@ multiclass VPseudoTiedBinary<VReg RetClass,
let VLMul = MInfo.value in {
def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
Constraint>;
+ def "_" # MInfo.MX # "_TIED_TU": VPseudoTiedBinaryNoMaskTU<RetClass, Op2Class,
+ Constraint>;
def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
Constraint>;
}
@@ -2820,14 +2863,14 @@ class VPatUnaryAnyMask<string intrinsic,
(mask_type VR:$rs2),
GPR:$vl, sew)>;
-class VPatBinaryNoMask<string intrinsic_name,
- string inst,
- ValueType result_type,
- ValueType op1_type,
- ValueType op2_type,
- int sew,
- VReg op1_reg_class,
- DAGOperand op2_kind> :
+class VPatBinaryM<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ int sew,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
@@ -2837,6 +2880,44 @@ class VPatBinaryNoMask<string intrinsic_name,
(op2_type op2_kind:$rs2),
GPR:$vl, sew)>;
+class VPatBinaryNoMaskTA<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ int sew,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type (undef)),
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ VLOpFrag)),
+ (!cast<Instruction>(inst)
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ GPR:$vl, sew)>;
+
+class VPatBinaryNoMaskTU<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ int sew,
+ VReg result_reg_class,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type result_reg_class:$merge),
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ VLOpFrag)),
+ (!cast<Instruction>(inst#"_TU")
+ (result_type result_reg_class:$merge),
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ GPR:$vl, sew)>;
+
// Same as above but source operands are swapped.
class VPatBinaryNoMaskSwapped<string intrinsic_name,
string inst,
@@ -2930,6 +3011,7 @@ class VPatTiedBinaryNoMask<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type (undef)),
(result_type result_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
@@ -2938,6 +3020,23 @@ class VPatTiedBinaryNoMask<string intrinsic_name,
(op2_type op2_kind:$rs2),
GPR:$vl, sew)>;
+class VPatTiedBinaryNoMaskTU<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op2_type,
+ int sew,
+ VReg result_reg_class,
+ DAGOperand op2_kind> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$merge),
+ (op2_type op2_kind:$rs2),
+ VLOpFrag)),
+ (!cast<Instruction>(inst#"_TIED_TU")
+ (result_type result_reg_class:$merge),
+ (op2_type op2_kind:$rs2),
+ GPR:$vl, sew)>;
+
class VPatTiedBinaryMask<string intrinsic_name,
string inst,
ValueType result_type,
@@ -3147,7 +3246,7 @@ multiclass VPatNullaryM<string intrinsic, string inst> {
GPR:$vl, mti.Log2SEW)>;
}
-multiclass VPatBinary<string intrinsic,
+multiclass VPatBinaryM<string intrinsic,
string inst,
ValueType result_type,
ValueType op1_type,
@@ -3158,8 +3257,8 @@ multiclass VPatBinary<string intrinsic,
VReg op1_reg_class,
DAGOperand op2_kind>
{
- def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
- sew, op1_reg_class, op2_kind>;
+ def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
+ sew, op1_reg_class, op2_kind>;
def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
mask_type, sew, result_reg_class, op1_reg_class,
op2_kind>;
@@ -3176,8 +3275,10 @@ multiclass VPatBinaryTA<string intrinsic,
VReg op1_reg_class,
DAGOperand op2_kind>
{
- def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
- sew, op1_reg_class, op2_kind>;
+ def : VPatBinaryNoMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
+ sew, op1_reg_class, op2_kind>;
+ def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
+ sew, result_reg_class, op1_reg_class, op2_kind>;
def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
mask_type, sew, result_reg_class, op1_reg_class,
op2_kind>;
@@ -3349,9 +3450,9 @@ multiclass VPatBinaryV_VI<string intrinsic, string instruction,
multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
foreach mti = AllMasks in
- def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX,
- mti.Mask, mti.Mask, mti.Mask,
- mti.Log2SEW, VR, VR>;
+ def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
+ mti.Mask, mti.Mask, mti.Mask,
+ mti.Log2SEW, VR, VR>;
}
multiclass VPatBinaryW_VV<string intrinsic, string instruction,
@@ -3387,10 +3488,17 @@ multiclass VPatBinaryW_WV<string intrinsic, string instruction,
def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Wti.Vector, Vti.Vector,
Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
- let AddedComplexity = 1 in
+ def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
+ Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
+ Wti.RegClass, Wti.RegClass, Vti.RegClass>;
+ let AddedComplexity = 1 in {
+ def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
+ Wti.Vector, Vti.Vector,
+ Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Wti.Vector, Vti.Vector, Vti.Mask,
Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
+ }
def : VPatBinaryMaskTA<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
Vti.Log2SEW, Wti.RegClass,
@@ -3508,10 +3616,10 @@ multiclass VPatBinaryV_I<string intrinsic, string instruction> {
multiclass VPatBinaryM_VV<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
- defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
- vti.Mask, vti.Vector, vti.Vector, vti.Mask,
- vti.Log2SEW, VR,
- vti.RegClass, vti.RegClass>;
+ defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
+ vti.Mask, vti.Vector, vti.Vector, vti.Mask,
+ vti.Log2SEW, VR,
+ vti.RegClass, vti.RegClass>;
}
multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
@@ -3527,20 +3635,20 @@ multiclass VPatBinaryM_VX<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in {
defvar kind = "V"#vti.ScalarSuffix;
- defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
- vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
- vti.Log2SEW, VR,
- vti.RegClass, vti.ScalarRegClass>;
+ defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
+ vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
+ vti.Log2SEW, VR,
+ vti.RegClass, vti.ScalarRegClass>;
}
}
multiclass VPatBinaryM_VI<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
- defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
- vti.Mask, vti.Vector, XLenVT, vti.Mask,
- vti.Log2SEW, VR,
- vti.RegClass, simm5>;
+ defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
+ vti.Mask, vti.Vector, XLenVT, vti.Mask,
+ vti.Log2SEW, VR,
+ vti.RegClass, simm5>;
}
multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
@@ -4075,13 +4183,24 @@ foreach vti = AllIntegerVectors in {
// Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
// to use a more complex splat sequence. Add the pattern for all VTs for
// consistency.
- def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2),
+ def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector (undef)),
+ (vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs2,
GPR:$vl,
vti.Log2SEW)>;
+ def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$rs2),
+ (vti.Vector vti.RegClass:$rs1),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_TU")
+ vti.RegClass:$merge,
+ vti.RegClass:$rs1,
+ vti.RegClass:$rs2,
+ GPR:$vl,
+ vti.Log2SEW)>;
def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
@@ -4098,7 +4217,8 @@ foreach vti = AllIntegerVectors in {
(XLenVT timm:$policy))>;
// Match VSUB with a small immediate to vadd.vi by negating the immediate.
- def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
+ def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)),
+ (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
@@ -4684,7 +4804,8 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
foreach vti = AllIntegerVectors in {
// Emit shift by 1 as an add since it might be faster.
- def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1),
+ def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef),
+ (vti.Vector vti.RegClass:$rs1),
(XLenVT 1), VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 7f5630456afec..cefdd4d3c6098 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1643,9 +1643,10 @@ def SDTRVVSlide : SDTypeProfile<1, 5, [
SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>,
SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>
]>;
-def SDTRVVSlide1 : SDTypeProfile<1, 4, [
- SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisInt<0>, SDTCisVT<2, XLenVT>,
- SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT>
+def SDTRVVSlide1 : SDTypeProfile<1, 5, [
+ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>,
+ SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>,
+ SDTCisVT<5, XLenVT>
]>;
def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>;
@@ -1660,16 +1661,30 @@ foreach vti = AllIntegerVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1),
+ def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef),
+ (vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX)
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1),
+ def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd),
+ (vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
+ (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU")
+ vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef),
+ (vti.Vector vti.RegClass:$rs1),
+ GPR:$rs2, (vti.Mask true_mask),
+ VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX)
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd),
+ (vti.Vector vti.RegClass:$rs1),
+ GPR:$rs2, (vti.Mask true_mask),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU")
+ vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
}
foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index 4216c5952eae8..3e370c4e9a0bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -23,6 +23,7 @@ define <vscale x 1 x i64> @access_fixed_object(i64 *%val) {
}
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -54,6 +55,7 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(i64 *%val) {
%len = load i64, i64* %local
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %v1,
<vscale x 1 x i64> %v2,
i64 %len)
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
new file mode 100644
index 0000000000000..0c24fcf42965c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
@@ -0,0 +1,115 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i32,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: slli a3, a2, 1
+; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v9, v9, a0
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4, i32 0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: slli a3, a2, 1
+; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v9, v9, a0
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4, i32 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+
+; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
+define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: slli a3, a2, 1
+; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v9, v9, a0
+; CHECK-NEXT: vslide1down.vx v9, v9, a1
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4, i32 2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
+define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
+; CHECK-NEXT: vslide1down.vx v8, v8, a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ <vscale x 1 x i1> %2,
+ i32 %3, i32 3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
+; CHECK-NEXT: vslide1down.vx v8, v8, a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ <vscale x 1 x i1> undef,
+ i32 %2, i32 3)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index 314c41296c72a..1fe6141f006ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -79,11 +79,11 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: addi sp, sp, 16
; SPILL-O2-NEXT: ret
{
- %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 %gvl)
+ %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 %gvl)
%call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0))
- %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %x, i32 %gvl)
+ %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %x, i32 %gvl)
ret <vscale x 1 x double> %z
}
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 %gvl)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 %gvl)
declare i32 @puts(i8*);
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index c2e44e7e4936f..453aabd3466c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -79,11 +79,11 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: addi sp, sp, 32
; SPILL-O2-NEXT: ret
{
- %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %gvl)
+ %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %gvl)
%call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0))
- %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %x, i64 %gvl)
+ %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %x, i64 %gvl)
ret <vscale x 1 x double> %z
}
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %gvl)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %gvl)
declare i32 @puts(i8*);
diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index 3d314b1779a3e..21bd0ee2a6bdb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
-; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
-; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64
declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
<vscale x 1 x i8>,
@@ -118,3 +118,2088 @@ entry:
ret <vscale x 1 x i8> %a
}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vaadd.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vaadd.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vaaddu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vaaddu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vadd.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vadd.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vand.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vand.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vasub.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vasub.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vasubu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vasubu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vdiv.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vdiv.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vdivu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vdivu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfadd.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfadd.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfdiv.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfdiv.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfmax.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfmax.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfmin.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfmin.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfmul.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfmul.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfrdiv.vf v8, v9, fa0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfrdiv.vf v8, v9, fa0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfsgnjn.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfsgnjn.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfsgnjx.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfsgnjx.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfrsub.vf v8, v9, fa0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfrsub.vf v8, v9, fa0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfslide1down.vf v8, v9, fa0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfslide1down.vf v8, v9, fa0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfslide1up.vf v8, v9, fa0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfslide1up.vf v8, v9, fa0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwsub.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwsub.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwsub.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwsub.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x half>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vl4re16.v v24, (a0)
+; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; RV32-NEXT: vfwsub.wv v8, v16, v24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vl4re16.v v24, (a0)
+; RV64-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; RV64-NEXT: vfwsub.wv v8, v16, v24
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwmul.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwmul.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwadd.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwadd.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwadd.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwadd.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfsub.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfsub.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+
+declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu
+; RV32-NEXT: vmv1r.v v10, v8
+; RV32-NEXT: vslide1down.vx v10, v9, a0
+; RV32-NEXT: vslide1down.vx v8, v10, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vslide1down.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu
+; RV32-NEXT: vmv1r.v v10, v8
+; RV32-NEXT: vslide1up.vx v10, v9, a1
+; RV32-NEXT: vslide1up.vx v8, v10, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vslide1up.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmax.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmax.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmaxu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmaxu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmin.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmin.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vminu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vminu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmul.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmul.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmulh.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmulh.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmulhsu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmulhsu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vmulhu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vmulhu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vnclip.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vnclip.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vnclipu.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vnclipu.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vnsra.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vnsra.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vnsrl.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vnsrl.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vor.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vor.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vrem.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vrem.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vrgather.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vrgather.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; RV32-NEXT: vrgather.vx v8, v9, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; RV64-NEXT: vrgather.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vrgatherei16.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vrgatherei16.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vsub.vv v8, v10, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vrsub.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vsadd.vv v8, v9, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vsadd.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsaddu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vsaddu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsll.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vsll.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsmul.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vsmul.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vsmul.vv v8, v9, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vsmul.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsra.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vsra.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsrl.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vsrl.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vssra.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vssra.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vssrl.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vssrl.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vssub.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vssub.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vssubu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vssubu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vssub.vv v8, v9, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vssub.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT: vssubu.vv v8, v9, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vssubu.vx v8, v9, a0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsub.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vsub.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwadd.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwadd.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwadd.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwadd.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwaddu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwaddu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwmul.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwmul.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwmulu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwmulu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwmulsu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwmulsu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwsub.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwsub.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwsub.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwsub.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwsub.wv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwsub.wv v8, v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwsubu.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwsubu.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vwsubu.wv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vwsubu.wv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vxor.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vxor.vv v8, v9, v10
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
index 13eb36dd1a251..2d9f005c0d373 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
index 4d692eb089c3f..76d556d6be40b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
index 4e796ebd4a32b..5d8b24ed84866 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
index 3f9ed340dede9..4fad38c164c72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
index 57db5175df548..4436a39dac78d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
@@ -3,6 +3,7 @@
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
index 4e7b12753aeb2..5f39548b45cc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -46,6 +48,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -58,6 +61,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -90,6 +94,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -102,6 +107,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -134,6 +140,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -146,6 +153,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -178,6 +186,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -190,6 +199,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -222,6 +232,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -234,6 +245,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -266,6 +278,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -278,6 +291,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -311,6 +325,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -323,6 +338,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -355,6 +371,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -367,6 +384,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -399,6 +417,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -411,6 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -443,6 +463,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -455,6 +476,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -487,6 +509,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -499,6 +522,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -531,6 +555,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -543,6 +568,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -576,6 +602,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -588,6 +615,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -620,6 +648,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -632,6 +661,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -664,6 +694,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -676,6 +707,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -708,6 +740,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -720,6 +753,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -752,6 +786,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -764,6 +799,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -797,6 +833,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -809,6 +846,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -841,6 +879,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -853,6 +892,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -885,6 +925,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -897,6 +938,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -929,6 +971,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -941,6 +984,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -974,6 +1018,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -986,6 +1031,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1018,6 +1064,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1030,6 +1077,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1062,6 +1110,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1074,6 +1123,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1106,6 +1156,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1118,6 +1169,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1150,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1162,6 +1215,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1194,6 +1248,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1206,6 +1261,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1238,6 +1294,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1250,6 +1307,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1282,6 +1340,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1294,6 +1353,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1326,6 +1386,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1338,6 +1399,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1370,6 +1432,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1382,6 +1445,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1414,6 +1478,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1426,6 +1491,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1458,6 +1524,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1470,6 +1537,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1502,6 +1570,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1514,6 +1583,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1546,6 +1616,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1558,6 +1629,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1590,6 +1662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1602,6 +1675,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1634,6 +1708,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1646,6 +1721,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1678,6 +1754,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1690,6 +1767,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1722,6 +1800,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1734,6 +1813,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1766,6 +1846,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1784,6 +1865,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1822,6 +1904,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1840,6 +1923,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1878,6 +1962,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1896,6 +1981,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1934,6 +2020,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1952,6 +2039,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -1997,6 +2085,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2029,6 +2118,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2061,6 +2151,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2093,6 +2184,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2125,6 +2217,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2157,6 +2250,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2189,6 +2283,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 -9,
i32 %1)
@@ -2221,6 +2316,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2253,6 +2349,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2285,6 +2382,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2317,6 +2415,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2349,6 +2448,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2381,6 +2481,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2413,6 +2514,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2445,6 +2547,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2477,6 +2580,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2509,6 +2613,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2541,6 +2646,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2573,6 +2679,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2605,6 +2712,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2637,6 +2745,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2669,6 +2778,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
index a56bf1bf97e6e..5f1bd6842cc29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
index 89b9dbcb57c0b..85041080b20cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -2041,6 +2129,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2073,6 +2162,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2105,6 +2195,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2137,6 +2228,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2169,6 +2261,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2201,6 +2294,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2233,6 +2327,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i32 %1)
@@ -2265,6 +2360,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2297,6 +2393,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2329,6 +2426,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2361,6 +2459,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2393,6 +2492,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2425,6 +2525,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2457,6 +2558,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2489,6 +2591,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2521,6 +2624,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2553,6 +2657,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2585,6 +2690,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2617,6 +2723,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2649,6 +2756,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2681,6 +2789,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2713,6 +2822,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
index 39a1d9bc4a081..2dfd3332001b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
index d1cfc9704c1e7..fb1f8927d85b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
index 6ad70daafb25d..595c05baa7a0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
index 4707aff206d82..1845da27c7b49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
index 74d4690a8c8db..7b9824e5592a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
index e062069311956..bfc267bfbb276 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
index 01f1100045567..349052d118587 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
index b86b902962ea5..da9582452bbb1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
index 7efea7cfe3fbf..d5355e87a3c1c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
index 041580b2b49d8..817c2514909f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
index 0145f2ad764ec..c0e4b5e49b6a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll
index 446981928b6cf..cb9c3cd0706ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll
index e151e9fb695de..4d6da26a75cf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll
index 0f4c738025617..2301057994d7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
index 58dc39b995055..a872aad5bf0f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -274,6 +286,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -286,6 +299,7 @@ define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -319,6 +333,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -331,6 +346,7 @@ define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -364,6 +380,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -376,6 +393,7 @@ define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -409,6 +427,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -421,6 +440,7 @@ define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -454,6 +474,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -466,6 +487,7 @@ define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -511,6 +534,7 @@ define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -556,6 +581,7 @@ define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -601,6 +628,7 @@ define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -646,6 +675,7 @@ define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
index 3fb281562088c..d1cabf11698e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -274,6 +286,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -286,6 +299,7 @@ define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -319,6 +333,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -331,6 +346,7 @@ define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -364,6 +380,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -376,6 +393,7 @@ define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -409,6 +427,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -421,6 +440,7 @@ define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -454,6 +474,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -466,6 +487,7 @@ define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -511,6 +534,7 @@ define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -556,6 +581,7 @@ define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -601,6 +628,7 @@ define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -646,6 +675,7 @@ define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
index 65a5592775cb5..568d0e83a635d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
index f16c8a6db1fb9..4a1171b0ffd3e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
index edfd578ce8aa0..cd1f2d8d6b742 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
index 6cbba483a8d9f..2bcf3cfb6b68f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -274,6 +286,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -286,6 +299,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -319,6 +333,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -331,6 +346,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -364,6 +380,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -376,6 +393,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -409,6 +427,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -421,6 +440,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -454,6 +474,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -466,6 +487,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -511,6 +534,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -556,6 +581,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -601,6 +628,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -646,6 +675,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
index 695cf7aab3f6c..b70fdec7716d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -17,6 +18,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -50,6 +52,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -63,6 +66,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -96,6 +100,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -109,6 +114,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -142,6 +148,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -155,6 +162,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -188,6 +196,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -201,6 +210,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -234,6 +244,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -247,6 +258,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -280,6 +292,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -293,6 +306,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -326,6 +340,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -339,6 +354,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -372,6 +388,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -385,6 +402,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -418,6 +436,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -431,6 +450,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -464,6 +484,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -477,6 +498,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -510,6 +532,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -523,6 +546,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -556,6 +580,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -569,6 +594,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -602,6 +628,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -615,6 +642,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -648,6 +676,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -661,6 +690,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll
index 645fb340ffa30..e627b8f8b69e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
iXLen);
@@ -241,6 +252,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -410,6 +428,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -422,6 +441,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -455,6 +475,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
iXLen);
@@ -467,6 +488,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x double>,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
iXLen %2)
@@ -682,6 +712,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
half,
iXLen);
@@ -694,6 +725,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -727,6 +759,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
half,
iXLen);
@@ -739,6 +772,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -772,6 +806,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
half,
iXLen);
@@ -784,6 +819,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -817,6 +853,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
half,
iXLen);
@@ -829,6 +866,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -862,6 +900,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
half,
iXLen);
@@ -874,6 +913,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -907,6 +947,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
half,
iXLen);
@@ -919,6 +960,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
half %1,
iXLen %2)
@@ -952,6 +994,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
float,
iXLen);
@@ -964,6 +1007,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -997,6 +1041,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
float,
iXLen);
@@ -1009,6 +1054,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -1042,6 +1088,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
float,
iXLen);
@@ -1054,6 +1101,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -1087,6 +1135,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
float,
iXLen);
@@ -1099,6 +1148,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
@@ -1132,6 +1182,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
float,
iXLen);
@@ -1144,6 +1195,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
float %1,
iXLen %2)
@@ -1177,6 +1229,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
double,
iXLen);
@@ -1189,6 +1242,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
double %1,
iXLen %2)
@@ -1222,6 +1276,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
double,
iXLen);
@@ -1234,6 +1289,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
double %1,
iXLen %2)
@@ -1267,6 +1323,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
double,
iXLen);
@@ -1279,6 +1336,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
double %1,
iXLen %2)
@@ -1312,6 +1370,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
double,
iXLen);
@@ -1324,6 +1383,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
double %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
index 541f2b8564f28..5910e790dec85 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -17,6 +18,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -50,6 +52,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -63,6 +66,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -96,6 +100,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -109,6 +114,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -142,6 +148,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -155,6 +162,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -188,6 +196,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -201,6 +210,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -234,6 +244,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -247,6 +258,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -280,6 +292,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -293,6 +306,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -326,6 +340,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -339,6 +354,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -372,6 +388,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -385,6 +402,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -418,6 +436,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
half,
iXLen);
@@ -431,6 +450,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -464,6 +484,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
half,
iXLen);
@@ -477,6 +498,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -510,6 +532,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
half,
iXLen);
@@ -523,6 +546,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -556,6 +580,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
half,
iXLen);
@@ -569,6 +594,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -602,6 +628,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
half,
iXLen);
@@ -615,6 +642,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -648,6 +676,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
float,
iXLen);
@@ -661,6 +690,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -694,6 +724,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
float,
iXLen);
@@ -707,6 +738,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
float,
iXLen);
@@ -753,6 +786,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
float,
iXLen);
@@ -799,6 +834,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
index 28cdfbf621b3d..1027e08ffce33 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -230,6 +240,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x float>,
iXLen);
@@ -242,6 +253,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
half,
iXLen);
@@ -423,6 +442,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
half %1,
iXLen %2)
@@ -456,6 +476,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
half,
iXLen);
@@ -468,6 +489,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
half %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
half,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
half %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
half,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
half %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
half,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
half %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
float,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
float %1,
iXLen %2)
@@ -681,6 +711,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
float,
iXLen);
@@ -693,6 +724,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
float %1,
iXLen %2)
@@ -726,6 +758,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
float,
iXLen);
@@ -738,6 +771,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
float %1,
iXLen %2)
@@ -771,6 +805,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
float,
iXLen);
@@ -783,6 +818,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
float %1,
iXLen %2)
@@ -1130,6 +1166,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %1,
<vscale x 1 x half> %0,
iXLen %2)
@@ -1146,6 +1183,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %1,
<vscale x 2 x half> %0,
iXLen %2)
@@ -1162,6 +1200,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %1,
<vscale x 4 x half> %0,
iXLen %2)
@@ -1178,6 +1217,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %1,
<vscale x 8 x half> %0,
iXLen %2)
@@ -1194,6 +1234,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %1,
<vscale x 1 x float> %0,
iXLen %2)
@@ -1210,6 +1251,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %1,
<vscale x 2 x float> %0,
iXLen %2)
@@ -1226,6 +1268,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %1,
<vscale x 4 x float> %0,
iXLen %2)
@@ -1242,6 +1285,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %1,
<vscale x 8 x float> %0,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
index b1ec8464047e1..017a9c65f034d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -17,6 +18,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -50,6 +52,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -63,6 +66,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -96,6 +100,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -109,6 +114,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -142,6 +148,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -155,6 +162,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -188,6 +196,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -201,6 +210,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -234,6 +244,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -247,6 +258,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -280,6 +292,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -293,6 +306,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -326,6 +340,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -339,6 +354,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -372,6 +388,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -385,6 +402,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -418,6 +436,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
half,
iXLen);
@@ -431,6 +450,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -464,6 +484,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
half,
iXLen);
@@ -477,6 +498,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -510,6 +532,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
half,
iXLen);
@@ -523,6 +546,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -556,6 +580,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
half,
iXLen);
@@ -569,6 +594,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -602,6 +628,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
half,
iXLen);
@@ -615,6 +642,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -648,6 +676,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
float,
iXLen);
@@ -661,6 +690,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -694,6 +724,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
float,
iXLen);
@@ -707,6 +738,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
float,
iXLen);
@@ -753,6 +786,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
float,
iXLen);
@@ -799,6 +834,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
index 916abcae0de0d..c7ed2108f4d58 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen);
@@ -17,6 +18,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -50,6 +52,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen);
@@ -63,6 +66,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -96,6 +100,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen);
@@ -109,6 +114,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -142,6 +148,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen);
@@ -155,6 +162,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -188,6 +196,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen);
@@ -201,6 +210,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -234,6 +244,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen);
@@ -247,6 +258,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -280,6 +292,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen);
@@ -293,6 +306,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -326,6 +340,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen);
@@ -339,6 +354,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -372,6 +388,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen);
@@ -385,6 +402,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -418,6 +436,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
half,
iXLen);
@@ -431,6 +450,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
half %1,
iXLen %2)
@@ -464,6 +484,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
half,
iXLen);
@@ -477,6 +498,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
half %1,
iXLen %2)
@@ -510,6 +532,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
half,
iXLen);
@@ -523,6 +546,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
half %1,
iXLen %2)
@@ -556,6 +580,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
half,
iXLen);
@@ -569,6 +594,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
half %1,
iXLen %2)
@@ -602,6 +628,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
half,
iXLen);
@@ -615,6 +642,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
half %1,
iXLen %2)
@@ -648,6 +676,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
float,
iXLen);
@@ -661,6 +690,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
float %1,
iXLen %2)
@@ -694,6 +724,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
float,
iXLen);
@@ -707,6 +738,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
float %1,
iXLen %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
float,
iXLen);
@@ -753,6 +786,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
float %1,
iXLen %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
float,
iXLen);
@@ -799,6 +834,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
float %1,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
index b5d008c3e1ed5..1b20551ceb9c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll
@@ -4,6 +4,7 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x half>,
iXLen);
@@ -16,6 +17,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
iXLen %2)
@@ -49,6 +51,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x half>,
iXLen);
@@ -61,6 +64,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
iXLen %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x half>,
iXLen);
@@ -106,6 +111,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
iXLen %2)
@@ -139,6 +145,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x half>,
iXLen);
@@ -151,6 +158,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
iXLen %2)
@@ -184,6 +192,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x half>,
iXLen);
@@ -196,6 +205,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
iXLen %2)
@@ -230,6 +240,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x float>,
iXLen);
@@ -242,6 +253,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
iXLen %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x float>,
iXLen);
@@ -287,6 +300,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
iXLen %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x float>,
iXLen);
@@ -332,6 +347,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
iXLen %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x float>,
iXLen);
@@ -377,6 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
iXLen %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
half,
iXLen);
@@ -423,6 +442,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
half %1,
iXLen %2)
@@ -456,6 +476,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
half,
iXLen);
@@ -468,6 +489,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
half %1,
iXLen %2)
@@ -501,6 +523,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
half,
iXLen);
@@ -513,6 +536,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
half %1,
iXLen %2)
@@ -546,6 +570,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
half,
iXLen);
@@ -558,6 +583,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
half %1,
iXLen %2)
@@ -591,6 +617,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
half,
iXLen);
@@ -603,6 +630,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
half %1,
iXLen %2)
@@ -636,6 +664,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
float,
iXLen);
@@ -648,6 +677,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
float %1,
iXLen %2)
@@ -681,6 +711,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
float,
iXLen);
@@ -693,6 +724,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
float %1,
iXLen %2)
@@ -726,6 +758,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
float,
iXLen);
@@ -738,6 +771,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
float %1,
iXLen %2)
@@ -771,6 +805,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
float,
iXLen);
@@ -783,6 +818,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
float %1,
iXLen %2)
@@ -1130,6 +1166,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %1,
<vscale x 1 x half> %0,
iXLen %2)
@@ -1146,6 +1183,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %1,
<vscale x 2 x half> %0,
iXLen %2)
@@ -1162,6 +1200,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %1,
<vscale x 4 x half> %0,
iXLen %2)
@@ -1178,6 +1217,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %1,
<vscale x 8 x half> %0,
iXLen %2)
@@ -1194,6 +1234,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %1,
<vscale x 1 x float> %0,
iXLen %2)
@@ -1210,6 +1251,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %1,
<vscale x 2 x float> %0,
iXLen %2)
@@ -1226,6 +1268,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %1,
<vscale x 4 x float> %0,
iXLen %2)
@@ -1242,6 +1285,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %1,
<vscale x 8 x float> %0,
iXLen %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
index 1d51eb6549cff..2f431ab0195c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
index da40c60ff8a96..49f63fab0555c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
index af080efa3358b..9523f1980ceb6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
index bc63daea21f69..caaeaf6ddfaef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
index ebc304b18f760..54c2463415254 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
index febe87403cdda..4cc67d660fdb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
index ec3cc011bd6e1..5f024c9b8a5f5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
index f3b9100c38a62..93881f6cfd54b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
index cf0c1da69743c..6727bb585e2a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
index adf746657527d..28c2d775e1bc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
index 51e254f5fb352..4bd0f83699203 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
index 9242b6220237d..2fd93845e4be2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
index 068a53f994fe1..28afafa2e0a38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8(<vscale x 64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
index 328c9098fb4d0..fd7f4a120ccff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8(<vscale x 64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
index 8c04d78a90602..a20149f9b23d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
index 48ce2b21aacb6..b3b7352810971 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
index 3dfadcc3f6c46..9452abe171184 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i32,
i32);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i32,
i32);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i32,
i32);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i32,
i32);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i32,
i32);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i32,
i32);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i32,
i32);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
index dc0f3f879e370..df8def1426f02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i64,
i64);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i64,
i64);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i64,
i64);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i64,
i64);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i64,
i64);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i64,
i64);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i64,
i64);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
index 1fb5c651ebe0f..7d368f9a0ca2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i32,
i32);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i32,
i32);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i32,
i32);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i32,
i32);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i32,
i32);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i32,
i32);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i32,
i32);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
index 0fd55a75c5cf5..4a41fe987c2d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i64,
i64);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i64,
i64);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i64,
i64);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i64,
i64);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i64,
i64);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i64,
i64);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i64,
i64);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
index 123dfbe13d77a..e29de52f45ab5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i32,
i32);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i32,
i32);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i32,
i32);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i32,
i32);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i32,
i32);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i32,
i32);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i32,
i32);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
index b6ad65065adbd..c9686b059b9cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i64,
i64);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i64,
i64);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i64,
i64);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i64,
i64);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i64,
i64);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i64,
i64);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i64,
i64);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
index 065203a3fa0d4..1d8338610a19a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i32,
i32);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i32,
i32);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i32,
i32);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i32,
i32);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i32,
i32);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i32,
i32);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i32,
i32);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
index 8f8a89a85e821..942e9cc7670fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -150,6 +157,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -183,6 +191,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -196,6 +205,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -229,6 +239,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -242,6 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -275,6 +287,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -287,6 +300,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -320,6 +334,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -332,6 +347,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -365,6 +381,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -378,6 +395,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -411,6 +429,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -424,6 +443,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -457,6 +477,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -470,6 +491,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -503,6 +525,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -515,6 +538,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -548,6 +572,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -561,6 +586,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -594,6 +620,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -607,6 +634,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -640,6 +668,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -653,6 +682,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -686,6 +716,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
<vscale x 1 x i16>,
i64,
i64);
@@ -698,6 +729,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -731,6 +763,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
<vscale x 2 x i16>,
i64,
i64);
@@ -743,6 +776,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -776,6 +810,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
<vscale x 4 x i16>,
i64,
i64);
@@ -788,6 +823,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -821,6 +857,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
<vscale x 8 x i16>,
i64,
i64);
@@ -834,6 +871,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -867,6 +905,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
<vscale x 16 x i16>,
i64,
i64);
@@ -880,6 +919,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -913,6 +953,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
<vscale x 32 x i16>,
i64,
i64);
@@ -926,6 +967,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -959,6 +1001,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i32>,
i64,
i64);
@@ -971,6 +1014,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1004,6 +1048,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1016,6 +1061,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1049,6 +1095,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1062,6 +1109,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1095,6 +1143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1108,6 +1157,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1141,6 +1191,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1154,6 +1205,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1187,6 +1239,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1199,6 +1252,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1232,6 +1286,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1245,6 +1300,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1278,6 +1334,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1291,6 +1348,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1324,6 +1382,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1337,6 +1396,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1377,6 +1437,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -1409,6 +1470,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -1441,6 +1503,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -1474,6 +1537,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -1507,6 +1571,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -1540,6 +1605,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -1572,6 +1638,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -1604,6 +1671,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -1637,6 +1705,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -1670,6 +1739,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -1703,6 +1773,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -1735,6 +1806,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -1768,6 +1840,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -1801,6 +1874,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -1834,6 +1908,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
index ab10d1ce9c3ad..3068585d78c70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -2041,6 +2129,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2073,6 +2162,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2105,6 +2195,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2137,6 +2228,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2169,6 +2261,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2201,6 +2294,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2233,6 +2327,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i32 %1)
@@ -2265,6 +2360,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2297,6 +2393,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2329,6 +2426,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2361,6 +2459,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2393,6 +2492,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2425,6 +2525,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2457,6 +2558,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2489,6 +2591,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2521,6 +2624,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2553,6 +2657,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2585,6 +2690,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2617,6 +2723,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2649,6 +2756,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2681,6 +2789,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2713,6 +2822,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
index d30bf628ad030..57be344004e28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
index e9a24bcee7d69..54b8d8808b7c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
index 100b79bde6258..4c894190474a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
index 6b3cf9df77ca6..1e5cdb83f3d83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
index 9b5c03b51f0b4..543868cfbf155 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
index 75e05a90a06cb..e529362ceab55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -325,6 +339,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -338,6 +353,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -371,6 +387,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -384,6 +401,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -417,6 +435,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -430,6 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -463,6 +483,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -476,6 +497,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -509,6 +531,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -522,6 +545,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -555,6 +579,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -568,6 +593,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -602,6 +628,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -615,6 +642,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -648,6 +676,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -661,6 +690,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -694,6 +724,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -707,6 +738,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -753,6 +786,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -799,6 +834,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -833,6 +869,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i16>,
i32);
@@ -846,6 +883,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -879,6 +917,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i16>,
i32);
@@ -892,6 +931,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -925,6 +965,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i16>,
i32);
@@ -938,6 +979,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -971,6 +1013,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i16>,
i32);
@@ -984,6 +1027,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -1017,6 +1061,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i16>,
i32);
@@ -1030,6 +1075,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -1063,6 +1109,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i16>,
i32);
@@ -1076,6 +1123,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -1110,6 +1158,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i32>,
i32);
@@ -1123,6 +1172,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -1156,6 +1206,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i32>,
i32);
@@ -1169,6 +1220,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -1202,6 +1254,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i32>,
i32);
@@ -1215,6 +1268,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -1248,6 +1302,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i32>,
i32);
@@ -1261,6 +1316,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -1294,6 +1350,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i32>,
i32);
@@ -1307,6 +1364,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -1341,6 +1399,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i64>,
i32);
@@ -1354,6 +1413,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -1387,6 +1447,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i64>,
i32);
@@ -1400,6 +1461,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -1433,6 +1495,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i64>,
i32);
@@ -1446,6 +1509,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -1479,6 +1543,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x i64>,
i32);
@@ -1492,6 +1557,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -1526,6 +1592,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
@@ -1539,6 +1606,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 %1,
i32 %2)
@@ -1572,6 +1640,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
@@ -1585,6 +1654,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 %1,
i32 %2)
@@ -1618,6 +1688,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
@@ -1631,6 +1702,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 %1,
i32 %2)
@@ -1664,6 +1736,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
@@ -1677,6 +1750,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 %1,
i32 %2)
@@ -1710,6 +1784,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
@@ -1723,6 +1798,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 %1,
i32 %2)
@@ -1756,6 +1832,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
@@ -1769,6 +1846,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 %1,
i32 %2)
@@ -1802,6 +1880,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i32,
i32);
@@ -1815,6 +1894,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 %1,
i32 %2)
@@ -1848,6 +1928,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
@@ -1861,6 +1942,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -1894,6 +1976,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
@@ -1907,6 +1990,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -1940,6 +2024,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
@@ -1953,6 +2038,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -1986,6 +2072,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
@@ -1999,6 +2086,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -2032,6 +2120,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
@@ -2045,6 +2134,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -2078,6 +2168,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i32,
i32);
@@ -2091,6 +2182,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -2124,6 +2216,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -2137,6 +2230,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -2170,6 +2264,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -2183,6 +2278,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -2216,6 +2312,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -2229,6 +2326,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -2262,6 +2360,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -2275,6 +2374,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -2308,6 +2408,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -2321,6 +2422,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -2354,6 +2456,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
i32,
i32);
@@ -2367,6 +2470,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
i32 %1,
i32 %2)
@@ -2400,6 +2504,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
i32,
i32);
@@ -2413,6 +2518,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
i32 %1,
i32 %2)
@@ -2446,6 +2552,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
i32,
i32);
@@ -2459,6 +2566,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
i32 %1,
i32 %2)
@@ -2492,6 +2600,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
i32,
i32);
@@ -2505,6 +2614,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
i32 %1,
i32 %2)
@@ -2538,6 +2648,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
i32,
i32);
@@ -2551,6 +2662,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
i32 %1,
i32 %2)
@@ -2584,6 +2696,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
i32,
i32);
@@ -2597,6 +2710,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
i32 %1,
i32 %2)
@@ -2630,6 +2744,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
i32,
i32);
@@ -2643,6 +2758,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
i32 %1,
i32 %2)
@@ -2676,6 +2792,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
i32,
i32);
@@ -2689,6 +2806,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
i32 %1,
i32 %2)
@@ -2722,6 +2840,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
i32,
i32);
@@ -2735,6 +2854,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
i32 %1,
i32 %2)
@@ -2768,6 +2888,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
i32,
i32);
@@ -2781,6 +2902,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
i32 %1,
i32 %2)
@@ -2814,6 +2936,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
i32,
i32);
@@ -2827,6 +2950,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
i32 %1,
i32 %2)
@@ -2860,6 +2984,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
i32,
i32);
@@ -2873,6 +2998,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
i32 %1,
i32 %2)
@@ -2906,6 +3032,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
i32,
i32);
@@ -2919,6 +3046,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
i32 %1,
i32 %2)
@@ -2952,6 +3080,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
i32,
i32);
@@ -2965,6 +3094,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
i32 %1,
i32 %2)
@@ -2998,6 +3128,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
i32,
i32);
@@ -3011,6 +3142,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
i32 %1,
i32 %2)
@@ -3052,6 +3184,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 9,
i32 %1)
@@ -3085,6 +3218,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 9,
i32 %1)
@@ -3118,6 +3252,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 9,
i32 %1)
@@ -3151,6 +3286,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 9,
i32 %1)
@@ -3184,6 +3320,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 9,
i32 %1)
@@ -3217,6 +3354,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 9,
i32 %1)
@@ -3250,6 +3388,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 9,
i32 %1)
@@ -3283,6 +3422,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -3316,6 +3456,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -3349,6 +3490,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -3382,6 +3524,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -3415,6 +3558,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -3448,6 +3592,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -3481,6 +3626,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -3514,6 +3660,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -3547,6 +3694,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -3580,6 +3728,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -3613,6 +3762,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -3646,6 +3796,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
i32 9,
i32 %1)
@@ -3679,6 +3830,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
i32 9,
i32 %1)
@@ -3712,6 +3864,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
i32 9,
i32 %1)
@@ -3745,6 +3898,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
i32 9,
i32 %1)
@@ -3778,6 +3932,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
i32 9,
i32 %1)
@@ -3811,6 +3966,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
i32 9,
i32 %1)
@@ -3844,6 +4000,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
i32 9,
i32 %1)
@@ -3877,6 +4034,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
i32 9,
i32 %1)
@@ -3910,6 +4068,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
i32 9,
i32 %1)
@@ -3943,6 +4102,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
i32 9,
i32 %1)
@@ -3976,6 +4136,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
i32 9,
i32 %1)
@@ -4009,6 +4170,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
i32 9,
i32 %1)
@@ -4042,6 +4204,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
i32 9,
i32 %1)
@@ -4075,6 +4238,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
i32 9,
i32 %1)
@@ -4108,6 +4272,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
index a15e7ba69e385..3a9ab1e28cd45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -325,6 +339,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -338,6 +353,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -371,6 +387,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -384,6 +401,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -417,6 +435,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -430,6 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -463,6 +483,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -476,6 +497,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -509,6 +531,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -522,6 +545,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -555,6 +579,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -568,6 +593,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -602,6 +628,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -615,6 +642,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -648,6 +676,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -661,6 +690,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -694,6 +724,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -707,6 +738,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -753,6 +786,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -799,6 +834,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -833,6 +869,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -846,6 +883,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -879,6 +917,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -892,6 +931,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -925,6 +965,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -938,6 +979,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -971,6 +1013,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -984,6 +1027,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -1018,6 +1062,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i16>,
i64);
@@ -1031,6 +1076,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -1064,6 +1110,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i16>,
i64);
@@ -1077,6 +1124,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -1110,6 +1158,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i16>,
i64);
@@ -1123,6 +1172,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -1156,6 +1206,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i16>,
i64);
@@ -1169,6 +1220,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -1202,6 +1254,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i16>,
i64);
@@ -1215,6 +1268,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -1248,6 +1302,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i16>,
i64);
@@ -1261,6 +1316,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -1295,6 +1351,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i32>,
i64);
@@ -1308,6 +1365,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -1341,6 +1399,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i32>,
i64);
@@ -1354,6 +1413,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -1387,6 +1447,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i32>,
i64);
@@ -1400,6 +1461,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -1433,6 +1495,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i32>,
i64);
@@ -1446,6 +1509,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -1479,6 +1543,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i32>,
i64);
@@ -1492,6 +1557,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -1526,6 +1592,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i64>,
i64);
@@ -1539,6 +1606,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -1572,6 +1640,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i64>,
i64);
@@ -1585,6 +1654,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -1618,6 +1688,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i64>,
i64);
@@ -1631,6 +1702,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -1664,6 +1736,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x i64>,
i64);
@@ -1677,6 +1750,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -1711,6 +1785,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
@@ -1724,6 +1799,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 %1,
i64 %2)
@@ -1757,6 +1833,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
@@ -1770,6 +1847,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 %1,
i64 %2)
@@ -1803,6 +1881,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
@@ -1816,6 +1895,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 %1,
i64 %2)
@@ -1849,6 +1929,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
@@ -1862,6 +1943,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 %1,
i64 %2)
@@ -1895,6 +1977,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
@@ -1908,6 +1991,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2025,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
@@ -1954,6 +2039,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 %1,
i64 %2)
@@ -1987,6 +2073,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
@@ -2000,6 +2087,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 %1,
i64 %2)
@@ -2033,6 +2121,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
@@ -2046,6 +2135,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -2079,6 +2169,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
@@ -2092,6 +2183,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -2125,6 +2217,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
@@ -2138,6 +2231,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -2171,6 +2265,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
@@ -2184,6 +2279,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -2217,6 +2313,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
@@ -2230,6 +2327,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -2263,6 +2361,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i64,
i64);
@@ -2276,6 +2375,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -2309,6 +2409,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
@@ -2322,6 +2423,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -2355,6 +2457,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
@@ -2368,6 +2471,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -2401,6 +2505,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
@@ -2414,6 +2519,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -2447,6 +2553,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
@@ -2460,6 +2567,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -2493,6 +2601,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64,
i64);
@@ -2506,6 +2615,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -2539,6 +2649,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -2552,6 +2663,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -2585,6 +2697,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -2598,6 +2711,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -2631,6 +2745,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -2644,6 +2759,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -2677,6 +2793,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -2690,6 +2807,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -2723,6 +2841,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
i64,
i64);
@@ -2736,6 +2855,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
i64 %1,
i64 %2)
@@ -2769,6 +2889,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
i64,
i64);
@@ -2782,6 +2903,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
i64 %1,
i64 %2)
@@ -2815,6 +2937,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
i64,
i64);
@@ -2828,6 +2951,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
i64 %1,
i64 %2)
@@ -2861,6 +2985,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
i64,
i64);
@@ -2874,6 +2999,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
i64 %1,
i64 %2)
@@ -2907,6 +3033,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
i64,
i64);
@@ -2920,6 +3047,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
i64 %1,
i64 %2)
@@ -2953,6 +3081,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
i64,
i64);
@@ -2966,6 +3095,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
i64 %1,
i64 %2)
@@ -2999,6 +3129,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
i64,
i64);
@@ -3012,6 +3143,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
i64 %1,
i64 %2)
@@ -3045,6 +3177,7 @@ entry:
}
declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
+ <vscale x 2 x float>,
<vscale x 2 x float>,
i64,
i64);
@@ -3058,6 +3191,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
i64 %1,
i64 %2)
@@ -3091,6 +3225,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
i64,
i64);
@@ -3104,6 +3239,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
i64 %1,
i64 %2)
@@ -3137,6 +3273,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
i64,
i64);
@@ -3150,6 +3287,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
i64 %1,
i64 %2)
@@ -3183,6 +3321,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
i64,
i64);
@@ -3196,6 +3335,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
i64 %1,
i64 %2)
@@ -3229,6 +3369,7 @@ entry:
}
declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
i64,
i64);
@@ -3242,6 +3383,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
i64 %1,
i64 %2)
@@ -3275,6 +3417,7 @@ entry:
}
declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
+ <vscale x 2 x double>,
<vscale x 2 x double>,
i64,
i64);
@@ -3288,6 +3431,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
i64 %1,
i64 %2)
@@ -3321,6 +3465,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
i64,
i64);
@@ -3334,6 +3479,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
i64 %1,
i64 %2)
@@ -3367,6 +3513,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
i64,
i64);
@@ -3380,6 +3527,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
i64 %1,
i64 %2)
@@ -3421,6 +3569,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 9,
i64 %1)
@@ -3454,6 +3603,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 9,
i64 %1)
@@ -3487,6 +3637,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 9,
i64 %1)
@@ -3520,6 +3671,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 9,
i64 %1)
@@ -3553,6 +3705,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 9,
i64 %1)
@@ -3586,6 +3739,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 9,
i64 %1)
@@ -3619,6 +3773,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 9,
i64 %1)
@@ -3652,6 +3807,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -3685,6 +3841,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -3718,6 +3875,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -3751,6 +3909,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -3784,6 +3943,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -3817,6 +3977,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -3850,6 +4011,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -3883,6 +4045,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -3916,6 +4079,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -3949,6 +4113,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -3982,6 +4147,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -4015,6 +4181,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -4048,6 +4215,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -4081,6 +4249,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -4114,6 +4283,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
@@ -4147,6 +4317,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
i64 9,
i64 %1)
@@ -4180,6 +4351,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
i64 9,
i64 %1)
@@ -4213,6 +4385,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
i64 9,
i64 %1)
@@ -4246,6 +4419,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
i64 9,
i64 %1)
@@ -4279,6 +4453,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
i64 9,
i64 %1)
@@ -4312,6 +4487,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
i64 9,
i64 %1)
@@ -4345,6 +4521,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
i64 9,
i64 %1)
@@ -4378,6 +4555,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
i64 9,
i64 %1)
@@ -4411,6 +4589,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
i64 9,
i64 %1)
@@ -4444,6 +4623,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
i64 9,
i64 %1)
@@ -4477,6 +4657,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
i64 9,
i64 %1)
@@ -4510,6 +4691,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
i64 9,
i64 %1)
@@ -4543,6 +4725,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
i64 9,
i64 %1)
@@ -4576,6 +4759,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
i64 9,
i64 %1)
@@ -4609,6 +4793,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
index 720d6d8585fdd..522c617097425 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i16>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i16>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i16>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i16>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i16>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i16>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -555,6 +579,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -568,6 +593,7 @@ define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -601,6 +627,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -614,6 +641,7 @@ define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -647,6 +675,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -660,6 +689,7 @@ define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -693,6 +723,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -706,6 +737,7 @@ define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i16>,
i32);
@@ -753,6 +786,7 @@ define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i16>,
i32);
@@ -799,6 +834,7 @@ define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -833,6 +869,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i16>,
i32);
@@ -846,6 +883,7 @@ define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -879,6 +917,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i16>,
i32);
@@ -892,6 +931,7 @@ define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -925,6 +965,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i16>,
i32);
@@ -938,6 +979,7 @@ define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -971,6 +1013,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i16>,
i32);
@@ -984,6 +1027,7 @@ define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -1017,6 +1061,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i16>,
i32);
@@ -1030,6 +1075,7 @@ define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -1063,6 +1109,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i16>,
i32);
@@ -1076,6 +1123,7 @@ define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -1110,6 +1158,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i16>,
i32);
@@ -1123,6 +1172,7 @@ define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -1156,6 +1206,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i16>,
i32);
@@ -1169,6 +1220,7 @@ define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -1202,6 +1254,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i16>,
i32);
@@ -1215,6 +1268,7 @@ define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -1248,6 +1302,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i16>,
i32);
@@ -1261,6 +1316,7 @@ define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -1295,6 +1351,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i16>,
i32);
@@ -1308,6 +1365,7 @@ define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -1341,6 +1399,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x i16>,
i32);
@@ -1354,6 +1413,7 @@ define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x i16> %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
index 9592630c19781..c31e6e68bc7b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i16>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i16>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i16>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i16>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i16>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i16>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -555,6 +579,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -568,6 +593,7 @@ define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -601,6 +627,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -614,6 +641,7 @@ define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -647,6 +675,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -660,6 +689,7 @@ define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -693,6 +723,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -706,6 +737,7 @@ define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -740,6 +772,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i16>,
i64);
@@ -753,6 +786,7 @@ define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -786,6 +820,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i16>,
i64);
@@ -799,6 +834,7 @@ define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -833,6 +869,7 @@ entry:
}
declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
+ <vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i16>,
i64);
@@ -846,6 +883,7 @@ define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -879,6 +917,7 @@ entry:
}
declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
+ <vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i16>,
i64);
@@ -892,6 +931,7 @@ define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -925,6 +965,7 @@ entry:
}
declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
+ <vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i16>,
i64);
@@ -938,6 +979,7 @@ define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -971,6 +1013,7 @@ entry:
}
declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
+ <vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i16>,
i64);
@@ -984,6 +1027,7 @@ define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -1017,6 +1061,7 @@ entry:
}
declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
+ <vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i16>,
i64);
@@ -1030,6 +1075,7 @@ define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -1063,6 +1109,7 @@ entry:
}
declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
+ <vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i16>,
i64);
@@ -1076,6 +1123,7 @@ define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -1110,6 +1158,7 @@ entry:
}
declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
+ <vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i16>,
i64);
@@ -1123,6 +1172,7 @@ define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -1156,6 +1206,7 @@ entry:
}
declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
+ <vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i16>,
i64);
@@ -1169,6 +1220,7 @@ define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -1202,6 +1254,7 @@ entry:
}
declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
+ <vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i16>,
i64);
@@ -1215,6 +1268,7 @@ define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -1248,6 +1302,7 @@ entry:
}
declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
+ <vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i16>,
i64);
@@ -1261,6 +1316,7 @@ define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -1295,6 +1351,7 @@ entry:
}
declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
+ <vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i16>,
i64);
@@ -1308,6 +1365,7 @@ define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -1341,6 +1399,7 @@ entry:
}
declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
+ <vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x i16>,
i64);
@@ -1354,6 +1413,7 @@ define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
<vscale x 8 x i16> %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
index bd00557309afd..422d326e248da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -317,6 +331,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -329,6 +344,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -362,6 +378,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -374,6 +391,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -407,6 +425,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -419,6 +438,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -452,6 +472,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -464,6 +485,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -497,6 +519,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -509,6 +532,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -542,6 +566,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -554,6 +579,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -587,6 +613,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -599,6 +626,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -632,6 +660,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -644,6 +673,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -677,6 +707,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -689,6 +720,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -722,6 +754,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -734,6 +767,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -767,6 +801,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -779,6 +814,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -812,6 +848,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -830,6 +867,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -869,6 +907,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -887,6 +926,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -926,6 +966,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -944,6 +985,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -983,6 +1025,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1001,6 +1044,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -1047,6 +1091,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -1079,6 +1124,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -1111,6 +1157,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -1143,6 +1190,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -1175,6 +1223,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -1207,6 +1256,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -1239,6 +1289,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i32 %1)
@@ -1271,6 +1322,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -1303,6 +1355,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -1335,6 +1388,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -1367,6 +1421,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -1399,6 +1454,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -1431,6 +1487,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -1463,6 +1520,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -1495,6 +1553,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -1527,6 +1586,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -1559,6 +1619,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -1591,6 +1652,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -1623,6 +1685,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -1655,6 +1718,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -1687,6 +1751,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -1719,6 +1784,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
index fe930ea9dcce7..372a7cbd560da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -317,6 +331,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -329,6 +344,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -362,6 +378,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -374,6 +391,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -407,6 +425,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -419,6 +438,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -452,6 +472,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -464,6 +485,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -497,6 +519,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -509,6 +532,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -542,6 +566,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -554,6 +579,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -587,6 +613,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -599,6 +626,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -632,6 +660,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -644,6 +673,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -677,6 +707,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -689,6 +720,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -722,6 +754,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -734,6 +767,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -767,6 +801,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -779,6 +814,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -812,6 +848,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -824,6 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -857,6 +895,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -869,6 +908,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -902,6 +942,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -914,6 +955,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -947,6 +989,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -959,6 +1002,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -999,6 +1043,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -1031,6 +1076,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -1063,6 +1109,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -1095,6 +1142,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -1127,6 +1175,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -1159,6 +1208,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -1191,6 +1241,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -1223,6 +1274,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -1255,6 +1307,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -1287,6 +1340,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -1319,6 +1373,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -1351,6 +1406,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -1383,6 +1439,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -1415,6 +1472,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -1447,6 +1505,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -1479,6 +1538,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -1511,6 +1571,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -1543,6 +1604,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -1575,6 +1637,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -1607,6 +1670,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -1639,6 +1703,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -1671,6 +1736,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
index 968fbf120aa3c..2961c0db836c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -2041,6 +2129,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2073,6 +2162,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2105,6 +2195,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2137,6 +2228,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2169,6 +2261,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2201,6 +2294,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2233,6 +2327,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i32 %1)
@@ -2265,6 +2360,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2297,6 +2393,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2329,6 +2426,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2361,6 +2459,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2393,6 +2492,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2425,6 +2525,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2457,6 +2558,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2489,6 +2591,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2521,6 +2624,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2553,6 +2657,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2585,6 +2690,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2617,6 +2723,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2649,6 +2756,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2681,6 +2789,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2713,6 +2822,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
index b1cf6b22b0f97..4042be68d6592 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
index c57a590eb3888..9c8492f816d36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -2041,6 +2129,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2073,6 +2162,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2105,6 +2195,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2137,6 +2228,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2169,6 +2261,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2201,6 +2294,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2233,6 +2327,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i32 %1)
@@ -2265,6 +2360,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2297,6 +2393,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2329,6 +2426,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2361,6 +2459,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2393,6 +2492,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2425,6 +2525,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2457,6 +2558,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2489,6 +2591,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2521,6 +2624,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2553,6 +2657,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2585,6 +2690,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2617,6 +2723,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2649,6 +2756,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2681,6 +2789,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2713,6 +2822,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
index 991d230237757..10efea7664219 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 5c0eb6be8ddaf..5d0c898e43691 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -7,12 +7,12 @@
declare i64 @llvm.riscv.vsetvli(i64, i64, i64)
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i64)
-declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64)
+declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, i64)
-declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i64)
+declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64)
-declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i64)
+declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64)
declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(double, i64)
declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(float, i64)
@@ -37,11 +37,11 @@ entry:
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.else: ; preds = %entry
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -70,16 +70,16 @@ entry:
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.else: ; preds = %entry
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.end: ; preds = %if.else, %if.then
%c.0 = phi <vscale x 1 x double> [ %1, %if.then ], [ %2, %if.else ]
- %3 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %0)
+ %3 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %0)
ret <vscale x 1 x double> %3
}
@@ -103,18 +103,18 @@ entry:
if.then: ; preds = %entry
%0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.else: ; preds = %entry
%2 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
- %3 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %2)
+ %3 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %2)
br label %if.end
if.end: ; preds = %if.else, %if.then
%vl.0 = phi i64 [ %0, %if.then], [ %2, %if.else ]
%c.0 = phi <vscale x 1 x double> [ %1, %if.then ], [ %3, %if.else ]
- %4 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %vl.0)
+ %4 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %vl.0)
ret <vscale x 1 x double> %4
}
@@ -158,7 +158,7 @@ entry:
if.then: ; preds = %entry
%0 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %avl)
%1 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %avl)
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %avl)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %avl)
%3 = bitcast i8* @scratch to <vscale x 1 x double>*
tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %2, <vscale x 1 x double>* %3, i64 %avl)
br label %if.end
@@ -166,13 +166,13 @@ if.then: ; preds = %entry
if.else: ; preds = %entry
%4 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %avl)
%5 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %avl)
- %6 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> %4, <vscale x 2 x float> %5, i64 %avl)
+ %6 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %4, <vscale x 2 x float> %5, i64 %avl)
%7 = bitcast i8* @scratch to <vscale x 2 x float>*
tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %6, <vscale x 2 x float>* %7, i64 %avl)
br label %if.end
if.end: ; preds = %if.else, %if.then
- %8 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %l, <vscale x 1 x double> %r, i64 %avl)
+ %8 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %l, <vscale x 1 x double> %r, i64 %avl)
ret <vscale x 1 x double> %8
}
@@ -204,11 +204,11 @@ entry:
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.else: ; preds = %entry
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -218,11 +218,11 @@ if.end: ; preds = %if.else, %if.then
br i1 %tobool3, label %if.else5, label %if.then4
if.then4: ; preds = %if.end
- %3 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %0)
+ %3 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %0)
br label %if.end6
if.else5: ; preds = %if.end
- %4 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %c.0, i64 %0)
+ %4 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %c.0, i64 %0)
br label %if.end6
if.end6: ; preds = %if.else5, %if.then4
@@ -284,11 +284,11 @@ entry:
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.else: ; preds = %entry
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -301,7 +301,7 @@ if.then4: ; preds = %if.end
%3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
%4 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %3)
%5 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %3)
- %6 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %4, <vscale x 1 x double> %5, i64 %3)
+ %6 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %4, <vscale x 1 x double> %5, i64 %3)
%7 = bitcast i8* @scratch to <vscale x 1 x double>*
tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %6, <vscale x 1 x double>* %7, i64 %3)
br label %if.end10
@@ -310,13 +310,13 @@ if.else5: ; preds = %if.end
%8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0)
%9 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %8)
%10 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %8)
- %11 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> %9, <vscale x 2 x float> %10, i64 %8)
+ %11 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %9, <vscale x 2 x float> %10, i64 %8)
%12 = bitcast i8* @scratch to <vscale x 2 x float>*
tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %11, <vscale x 2 x float>* %12, i64 %8)
br label %if.end10
if.end10: ; preds = %if.else5, %if.then4
- %13 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %c.0, <vscale x 1 x double> %c.0, i64 %0)
+ %13 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %c.0, <vscale x 1 x double> %c.0, i64 %0)
ret <vscale x 1 x double> %13
}
@@ -368,12 +368,12 @@ entry:
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.else: ; preds = %entry
call void @foo()
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -428,17 +428,17 @@ entry:
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
call void @foo()
br label %if.end
if.else: ; preds = %entry
- %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+ %2 = tail call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
br label %if.end
if.end: ; preds = %if.else, %if.then
%c.0 = phi <vscale x 1 x double> [ %1, %if.then ], [ %2, %if.else ]
- %3 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %0)
+ %3 = tail call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %c.0, <vscale x 1 x double> %a, i64 %0)
ret <vscale x 1 x double> %3
}
@@ -520,18 +520,18 @@ entry:
if:
%b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %y, i64 %vl)
- %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i16> %b, i16 0, i64 %vl)
+ %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i16> %b, i16 0, i64 %vl)
br label %if.end
if.end:
%d = phi <vscale x 2 x i32> [ %z, %entry ], [ %c, %if ]
- %e = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %d, i64 %vl)
+ %e = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %a, <vscale x 2 x i32> %d, i64 %vl)
ret <vscale x 2 x i32> %e
}
declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64)
declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i64)
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i16>, i16, i64)
-declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64)
+declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i16>, i16, i64)
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64)
; We can use X0, X0 vsetvli in if2 and if2.end. The merge point as if.end will
; see two
diff erent vtypes with the same SEW/LMUL ratio. At if2.end we will only
@@ -566,7 +566,7 @@ entry:
if:
%b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %y, i64 %vl)
- %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32> %a, <vscale x 2 x i16> %b, i64 %vl)
+ %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> %a, <vscale x 2 x i16> %b, i64 %vl)
br label %if.end
if.end:
@@ -575,12 +575,12 @@ if.end:
if2:
%e = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %z, i64 %vl)
- %f = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32> %d, <vscale x 2 x i16> %e, i64 %vl)
+ %f = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> %d, <vscale x 2 x i16> %e, i64 %vl)
br label %if2.end
if2.end:
%g = phi <vscale x 2 x i32> [ %d, %if.end ], [ %f, %if2 ]
- %h = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32> %g, <vscale x 2 x i32> %w, i64 %vl)
+ %h = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %g, <vscale x 2 x i32> %w, i64 %vl)
ret <vscale x 2 x i32> %h
}
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32>, <vscale x 2 x i16>, i64)
+declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i16>, i64)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index 1cb41692a8cc8..489e43c5db6fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -15,11 +15,11 @@
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
br label %if.end
if.else: ; preds = %entry
- %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
+ %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -56,11 +56,11 @@
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
br label %if.end
if.else: ; preds = %entry
- %b = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %1, i64 %2)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %1, i64 %2)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -79,11 +79,11 @@
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
br label %if.end
if.else: ; preds = %entry
- %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
+ %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
br label %if.end
if.end: ; preds = %if.else, %if.then
@@ -125,10 +125,10 @@
declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
; Function Attrs: nounwind readnone
- declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
+ declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
; Function Attrs: nounwind readnone
- declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
+ declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
; Function Attrs: nounwind readonly
declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index e708f5b75639e..f08cf4f840947 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -5,6 +5,7 @@
declare i64 @llvm.riscv.vsetvli(i64, i64, i64)
declare i64 @llvm.riscv.vsetvlimax(i64, i64)
declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
i64)
@@ -23,6 +24,7 @@ define <vscale x 1 x double> @test1(i64 %avl, <vscale x 1 x double> %a, <vscale
entry:
%0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
%1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %a,
<vscale x 1 x double> %b,
i64 %0)
@@ -38,6 +40,7 @@ define <vscale x 1 x double> @test2(i64 %avl, <vscale x 1 x double> %a, <vscale
entry:
%0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
%1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %a,
<vscale x 1 x double> %b,
i64 %avl)
@@ -247,6 +250,7 @@ define <vscale x 1 x double> @test13(<vscale x 1 x double> %a, <vscale x 1 x dou
; CHECK-NEXT: ret
entry:
%0 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %a,
<vscale x 1 x double> %b,
i64 -1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index 4aa61e0fd5fce..d7a49aaab4c70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -10,14 +10,14 @@
define <vscale x 1 x i64> @add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @load_add(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
- %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
ret <vscale x 1 x i64> %b
}
@@ -60,7 +60,7 @@
define <vscale x 1 x i64> @vsetvli_add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
entry:
%a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
- %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
ret <vscale x 1 x i64> %b
}
@@ -68,12 +68,12 @@
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
call void asm sideeffect "", ""()
- %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
ret <vscale x 1 x i64> %b
}
; Function Attrs: nounwind readnone
- declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
+ declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
; Function Attrs: nounwind readonly
declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
index a9a9ea8019b0e..d01e7a6a006cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -317,6 +331,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -329,6 +344,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -362,6 +378,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -374,6 +391,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -407,6 +425,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -419,6 +438,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -452,6 +472,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -464,6 +485,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -497,6 +519,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -509,6 +532,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -542,6 +566,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -554,6 +579,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -587,6 +613,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -599,6 +626,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -632,6 +660,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -644,6 +673,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -677,6 +707,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -689,6 +720,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -722,6 +754,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -734,6 +767,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -767,6 +801,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -779,6 +814,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -812,6 +848,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -826,6 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -863,6 +901,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -877,6 +916,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -914,6 +954,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -928,6 +969,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -965,6 +1007,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -979,6 +1022,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
index 2110f21bf13d7..696c12a9fd6de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -317,6 +331,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -329,6 +344,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -362,6 +378,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -374,6 +391,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -407,6 +425,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -419,6 +438,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -452,6 +472,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -464,6 +485,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -497,6 +519,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -509,6 +532,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -542,6 +566,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -554,6 +579,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -587,6 +613,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -599,6 +626,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -632,6 +660,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -644,6 +673,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -677,6 +707,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -689,6 +720,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -722,6 +754,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -734,6 +767,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -767,6 +801,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -779,6 +814,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -812,6 +848,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -824,6 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -857,6 +895,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -869,6 +908,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -902,6 +942,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -914,6 +955,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -947,6 +989,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -959,6 +1002,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
index c5b0bb3c07a29..1209956e7161a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -291,6 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -337,6 +352,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -383,6 +400,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -429,6 +448,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -475,6 +496,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -521,6 +544,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -567,6 +592,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -613,6 +640,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -659,6 +688,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -705,6 +736,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -751,6 +784,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -797,6 +832,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -844,6 +881,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -881,6 +919,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -895,6 +934,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -932,6 +972,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -946,6 +987,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -983,6 +1025,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -997,6 +1040,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
index 81c16b9c315cd..a0f9476845ef7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -291,6 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -337,6 +352,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -383,6 +400,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -429,6 +448,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -475,6 +496,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -521,6 +544,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -567,6 +592,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -613,6 +640,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -659,6 +688,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -705,6 +736,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -751,6 +784,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -797,6 +832,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -843,6 +880,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -889,6 +928,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -935,6 +976,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
index cd141520ce540..019b40e37ef27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i32,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i32,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 9,
i32 %1)
@@ -2008,6 +2097,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 1,
i32 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 9,
i32 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 9,
i32 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 9,
i32 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 9,
i32 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 9,
i32 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 9,
i32 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -2697,6 +2807,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
index 305f8712b0f43..da88d4f73cfe9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i64,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 9,
i64 %1)
@@ -2008,6 +2097,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 1,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2697,6 +2807,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
index be3a442cc61b7..50a08f0c60a1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
index 3a5eb3c18f2b6..5380ff997c059 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
index 78eac5fcc9ec7..24f8d8ae572f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i32,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i32,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 9,
i32 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 9,
i32 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 9,
i32 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 9,
i32 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 9,
i32 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 9,
i32 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 9,
i32 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
index 4841ca02e382b..435d59d5aa55b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i64,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
index 8ae5ec19ccda0..0d68ab09c4b14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i32,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i32,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 9,
i32 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 9,
i32 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 9,
i32 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 9,
i32 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 9,
i32 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 9,
i32 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 9,
i32 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
index 054d81a54db17..270f5ca83b62c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i64,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
index 9cb0c4803370c..622d1e9e3244c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 %1,
i32 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
@@ -1007,6 +1052,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 %1,
i32 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
@@ -1052,6 +1099,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 %1,
i32 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i32,
i32);
@@ -1097,6 +1146,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 %1,
i32 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
@@ -1142,6 +1193,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
@@ -1187,6 +1240,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
@@ -1232,6 +1287,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
@@ -1277,6 +1334,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
@@ -1322,6 +1381,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -1355,6 +1415,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i32,
i32);
@@ -1367,6 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -1400,6 +1462,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1412,6 +1475,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1445,6 +1509,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1457,6 +1522,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1490,6 +1556,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1502,6 +1569,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1535,6 +1603,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1547,6 +1616,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1580,6 +1650,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1592,6 +1663,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1625,6 +1697,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1637,6 +1710,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1670,6 +1744,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1682,6 +1757,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1715,6 +1791,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1727,6 +1804,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1760,6 +1838,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1772,6 +1851,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1812,6 +1892,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 9,
i32 %1)
@@ -1844,6 +1925,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 9,
i32 %1)
@@ -1876,6 +1958,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 9,
i32 %1)
@@ -1908,6 +1991,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 9,
i32 %1)
@@ -1940,6 +2024,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 9,
i32 %1)
@@ -1972,6 +2057,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 9,
i32 %1)
@@ -2004,6 +2090,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 9,
i32 %1)
@@ -2036,6 +2123,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -2068,6 +2156,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -2100,6 +2189,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -2132,6 +2222,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -2164,6 +2255,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -2196,6 +2288,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -2228,6 +2321,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2260,6 +2354,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2292,6 +2387,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2324,6 +2420,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2356,6 +2453,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
index b39bdb4bdda64..621d8a59b4010 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i64,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
index 173f2f863d55b..bac9d515b0adf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 %1,
i32 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
@@ -1007,6 +1052,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 %1,
i32 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
@@ -1052,6 +1099,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 %1,
i32 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i32,
i32);
@@ -1097,6 +1146,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 %1,
i32 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
@@ -1142,6 +1193,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 %1,
i32 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
@@ -1187,6 +1240,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 %1,
i32 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
@@ -1232,6 +1287,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 %1,
i32 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
@@ -1277,6 +1334,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 %1,
i32 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
@@ -1322,6 +1381,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 %1,
i32 %2)
@@ -1355,6 +1415,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i32,
i32);
@@ -1367,6 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 %1,
i32 %2)
@@ -1400,6 +1462,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1412,6 +1475,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1445,6 +1509,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1457,6 +1522,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1490,6 +1556,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1502,6 +1569,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1535,6 +1603,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1547,6 +1616,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1580,6 +1650,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1592,6 +1663,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1625,6 +1697,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1637,6 +1710,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1670,6 +1744,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1682,6 +1757,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1715,6 +1791,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1727,6 +1804,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1760,6 +1838,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1772,6 +1851,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1812,6 +1892,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i32 9,
i32 %1)
@@ -1844,6 +1925,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i32 9,
i32 %1)
@@ -1876,6 +1958,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i32 9,
i32 %1)
@@ -1908,6 +1991,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i32 9,
i32 %1)
@@ -1940,6 +2024,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i32 9,
i32 %1)
@@ -1972,6 +2057,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i32 9,
i32 %1)
@@ -2004,6 +2090,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i32 9,
i32 %1)
@@ -2036,6 +2123,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i32 9,
i32 %1)
@@ -2068,6 +2156,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i32 9,
i32 %1)
@@ -2100,6 +2189,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i32 9,
i32 %1)
@@ -2132,6 +2222,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i32 9,
i32 %1)
@@ -2164,6 +2255,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i32 9,
i32 %1)
@@ -2196,6 +2288,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i32 9,
i32 %1)
@@ -2228,6 +2321,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2260,6 +2354,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2292,6 +2387,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2324,6 +2420,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2356,6 +2453,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
index 3bc2f68ebb94a..98da6798eca6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i64,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i64,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i64 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i64 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i64 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i64 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i64 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i64 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i64 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i64 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i64 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i64 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i64 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i64 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i64 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i64 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i64 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i64 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i64 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i64 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
index c695923f56a06..d8c62715bef94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
index bff302a14b36b..5c328c26d614c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
index 2b0fcf54963c7..6eab93f84539d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
index 0169d78b4a798..2f054417e7a7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
index a9896a305516f..4f0bff3562ace 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -2041,6 +2129,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2073,6 +2162,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2105,6 +2195,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2137,6 +2228,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2169,6 +2261,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2201,6 +2294,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2233,6 +2327,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 -9,
i32 %1)
@@ -2265,6 +2360,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2297,6 +2393,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2329,6 +2426,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2361,6 +2459,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2393,6 +2492,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2425,6 +2525,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2457,6 +2558,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2489,6 +2591,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2521,6 +2624,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2553,6 +2657,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2585,6 +2690,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2617,6 +2723,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2649,6 +2756,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2681,6 +2789,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2713,6 +2822,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
index 4840fa6030363..2cb7f359dcf0d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
index 72f559952e294..4c2c1a9aa8ec3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
index 348d2b32b65c7..722349fed6613 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
index 6fab280f8f9b1..b11f800551c30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i32);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i32);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i32);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i32);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i32);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i32);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i32);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i32);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i32);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i32);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i32 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i32 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i32 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i32 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i32 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i32 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i32 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i32 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i32 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i32 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i32 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i32 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i32 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
index 65481d304ddf4..6e753d8b139de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i64);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i64 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i64);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i64 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i64);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i64);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i64);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i64);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i64 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i64);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i64 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i64);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i64 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i64);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i64 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i64);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i64 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i64);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i64 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i64);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i64 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i64);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i64 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i64);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i64 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i64 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i64 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i64 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i64 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i64 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i64 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i64 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i64 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i64 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i64 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i64 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i64 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i64 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
index f37d6ed898679..a8c0d0c1ff389 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
index 6c546a504b017..d5acb1347fcf4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
index 1128135e33ab4..ff0c070a7b5c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i32);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i32);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i32);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i32);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i32);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i32);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i32);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i32);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i32);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i32);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i32 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i32 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i32 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i32 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i32 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i32 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i32 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i32 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i32 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i32 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i32 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i32 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i32 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
index e3840e33f81a8..486acc72cd9eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i64);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i64 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i64);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i64 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i64);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i64);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i64);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i64);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i64 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i64);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i64 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i64);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i64 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i64);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i64 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i64);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i64 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i64);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i64 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i64);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i64 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i64);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i64 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i64);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i64 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i64 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i64 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i64 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i64 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i64 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i64 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i64 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i64 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i64 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i64 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i64 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i64 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i64 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
index 338e4b352ddf3..1484ded004175 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
index da5accc0f1bd4..8d56ba9e557f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
index 4f480ebea8726..1edfa38956c19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
index da7d9b97bc0ac..f821a23f1f9e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
index 11ac17450a4cf..ce02e398e7fdc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
index e9adaec0bdc8c..ab57eb0e0bfc4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
index ef27184103d60..a37b6c08580f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
index 2de365ecb1218..bc1e468c93f83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
index 213cb69a9a6a2..a57271f6b854c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i32);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i32);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i32);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i32);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i32);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i32);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i32);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i32);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i32);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i32);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i32 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i32 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i32 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i32 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i32 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i32 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i32 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i32 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i32 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i32 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i32 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i32 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i32 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
index 6e8aff5e81593..f4abf11d4ac50 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i64);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i64 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i64);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i64 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i64);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i64);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i64);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i64);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i64 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i64);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i64 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i64);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i64 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i64);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i64 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i64);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i64 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i64);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i64 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i64);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i64 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i64);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i64 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i64);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i64 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i64 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i64 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i64 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i64 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i64 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i64 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i64 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i64 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i64 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i64 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i64 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i64 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i64 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
index 603f685d4c7e6..2109237527f44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i32);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i32);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i32);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i32);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i32);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i32);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i32);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
index 3c4cc0283cc0d..dac6937f73750 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -48,6 +50,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -61,6 +64,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -94,6 +98,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -107,6 +112,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -140,6 +146,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -153,6 +160,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -186,6 +194,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -199,6 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -232,6 +242,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -245,6 +256,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -278,6 +290,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -291,6 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -324,6 +338,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -337,6 +352,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -370,6 +386,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -383,6 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -416,6 +434,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -429,6 +448,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -462,6 +482,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -475,6 +496,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -508,6 +530,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -521,6 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -554,6 +578,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -567,6 +592,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -600,6 +626,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -613,6 +640,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -646,6 +674,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -659,6 +688,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -692,6 +722,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
i64);
@@ -705,6 +736,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -738,6 +770,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
i64);
@@ -751,6 +784,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -784,6 +818,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
i64);
@@ -797,6 +832,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -830,6 +866,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
i64);
@@ -843,6 +880,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -876,6 +914,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
i64);
@@ -889,6 +928,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -922,6 +962,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
i64);
@@ -935,6 +976,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -968,6 +1010,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
i64);
@@ -981,6 +1024,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1014,6 +1058,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1027,6 +1072,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1060,6 +1106,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1073,6 +1120,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1106,6 +1154,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1119,6 +1168,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1152,6 +1202,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1165,6 +1216,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1198,6 +1250,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1211,6 +1264,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1244,6 +1298,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1257,6 +1312,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1290,6 +1346,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1303,6 +1360,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1336,6 +1394,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1349,6 +1408,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
index 1406f14de4422..3c7f59da442f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i32);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i32);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i32);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i32);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i32);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i32);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i32);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i32);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i32);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i32);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i32);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i32 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i32 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i32 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i32 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i32 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i32 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i32 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i32 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i32 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i32 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i32 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i32 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i32 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i32 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
index d504b680f2a6f..4b47c25384262 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -273,6 +285,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
@@ -285,6 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -499,6 +521,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
@@ -511,6 +534,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -544,6 +568,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
@@ -556,6 +581,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -680,6 +710,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
i64);
@@ -692,6 +723,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i8 %1,
i64 %2)
@@ -725,6 +757,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
i64);
@@ -737,6 +770,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i8 %1,
i64 %2)
@@ -770,6 +804,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
i64);
@@ -782,6 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i8 %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
i64);
@@ -827,6 +864,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i8 %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
i64);
@@ -872,6 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i8 %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
i64);
@@ -917,6 +958,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i8 %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i16 %1,
i64 %2)
@@ -995,6 +1039,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
i64);
@@ -1007,6 +1052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i16 %1,
i64 %2)
@@ -1040,6 +1086,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
i64);
@@ -1052,6 +1099,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i16 %1,
i64 %2)
@@ -1085,6 +1133,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
i64);
@@ -1097,6 +1146,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i16 %1,
i64 %2)
@@ -1130,6 +1180,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
i64);
@@ -1142,6 +1193,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i16 %1,
i64 %2)
@@ -1175,6 +1227,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i64);
@@ -1187,6 +1240,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i32 %1,
i64 %2)
@@ -1220,6 +1274,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i64);
@@ -1232,6 +1287,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i32 %1,
i64 %2)
@@ -1265,6 +1321,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i64);
@@ -1277,6 +1334,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i32 %1,
i64 %2)
@@ -1310,6 +1368,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
i64);
@@ -1322,6 +1381,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i32 %1,
i64 %2)
@@ -1873,6 +1933,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %0,
i64 %2)
@@ -1889,6 +1950,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %0,
i64 %2)
@@ -1905,6 +1967,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %0,
i64 %2)
@@ -1921,6 +1984,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<v
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %0,
i64 %2)
@@ -1937,6 +2001,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %0,
i64 %2)
@@ -1953,6 +2018,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %0,
i64 %2)
@@ -1969,6 +2035,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %0,
i64 %2)
@@ -1985,6 +2052,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %0,
i64 %2)
@@ -2001,6 +2069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %0,
i64 %2)
@@ -2017,6 +2086,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %0,
i64 %2)
@@ -2033,6 +2103,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %0,
i64 %2)
@@ -2049,6 +2120,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %0,
i64 %2)
@@ -2065,6 +2137,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %0,
i64 %2)
@@ -2081,6 +2154,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %0,
i64 %2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
index 8da7f7acb2e62..0d006ca9d9f6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i32);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i32 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i32);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i32 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i32 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i32);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i32);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i32);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i32);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i32);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i32);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i32);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i32 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i32);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i32);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i32);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i32);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i32);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i32);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i32 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i32);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i32 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i32);
@@ -1824,6 +1905,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
@@ -1863,6 +1945,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i32);
@@ -1881,6 +1964,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
@@ -1920,6 +2004,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i32);
@@ -1938,6 +2023,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
@@ -1977,6 +2063,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i32);
@@ -1995,6 +2082,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
@@ -2041,6 +2129,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i32 %1)
@@ -2073,6 +2162,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i32 %1)
@@ -2105,6 +2195,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i32 %1)
@@ -2137,6 +2228,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i32 %1)
@@ -2169,6 +2261,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i32 %1)
@@ -2201,6 +2294,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i32 %1)
@@ -2233,6 +2327,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i32 %1)
@@ -2265,6 +2360,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i32 %1)
@@ -2297,6 +2393,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i32 %1)
@@ -2329,6 +2426,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i32 %1)
@@ -2361,6 +2459,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i32 %1)
@@ -2393,6 +2492,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i32 %1)
@@ -2425,6 +2525,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i32 %1)
@@ -2457,6 +2558,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i32 %1)
@@ -2489,6 +2591,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i32 %1)
@@ -2521,6 +2624,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i32 %1)
@@ -2553,6 +2657,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i32 %1)
@@ -2585,6 +2690,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i32 %1)
@@ -2617,6 +2723,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
@@ -2649,6 +2756,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
@@ -2681,6 +2789,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
@@ -2713,6 +2822,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
index 08c47cd6c17fe..45e21d13ed807 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64);
@@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2)
@@ -47,6 +49,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64);
@@ -59,6 +62,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2)
@@ -92,6 +96,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64);
@@ -104,6 +109,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2)
@@ -137,6 +143,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
@@ -149,6 +156,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
@@ -182,6 +190,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64);
@@ -194,6 +203,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2)
@@ -227,6 +237,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64);
@@ -239,6 +250,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2)
@@ -272,6 +284,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64);
@@ -284,6 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
i64 %2)
@@ -318,6 +332,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64);
@@ -330,6 +345,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2)
@@ -363,6 +379,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64);
@@ -375,6 +392,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2)
@@ -408,6 +426,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64);
@@ -420,6 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2)
@@ -453,6 +473,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64);
@@ -465,6 +486,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2)
@@ -498,6 +520,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64);
@@ -510,6 +533,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2)
@@ -543,6 +567,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i64);
@@ -555,6 +580,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i64 %2)
@@ -589,6 +615,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64);
@@ -601,6 +628,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2)
@@ -634,6 +662,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64);
@@ -646,6 +675,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2)
@@ -679,6 +709,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64);
@@ -691,6 +722,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2)
@@ -724,6 +756,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64);
@@ -736,6 +769,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2)
@@ -769,6 +803,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i64);
@@ -781,6 +816,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i64 %2)
@@ -815,6 +851,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64);
@@ -827,6 +864,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2)
@@ -860,6 +898,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64);
@@ -872,6 +911,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2)
@@ -905,6 +945,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64);
@@ -917,6 +958,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2)
@@ -950,6 +992,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64);
@@ -962,6 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2)
@@ -996,6 +1040,7 @@ entry:
}
declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
i64);
@@ -1008,6 +1053,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
i64 %2)
@@ -1041,6 +1087,7 @@ entry:
}
declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
i64);
@@ -1053,6 +1100,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
i64 %2)
@@ -1086,6 +1134,7 @@ entry:
}
declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
i64);
@@ -1098,6 +1147,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
i64 %2)
@@ -1131,6 +1181,7 @@ entry:
}
declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
i64);
@@ -1143,6 +1194,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
i64 %2)
@@ -1176,6 +1228,7 @@ entry:
}
declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
i64);
@@ -1188,6 +1241,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
i64 %2)
@@ -1221,6 +1275,7 @@ entry:
}
declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
i64);
@@ -1233,6 +1288,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
i64 %2)
@@ -1266,6 +1322,7 @@ entry:
}
declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
i64);
@@ -1278,6 +1335,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
i64 %2)
@@ -1311,6 +1369,7 @@ entry:
}
declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
i64);
@@ -1323,6 +1382,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
i64 %2)
@@ -1356,6 +1416,7 @@ entry:
}
declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
i64);
@@ -1368,6 +1429,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
i64 %2)
@@ -1401,6 +1463,7 @@ entry:
}
declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
i64);
@@ -1413,6 +1476,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
i64 %2)
@@ -1446,6 +1510,7 @@ entry:
}
declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
i64);
@@ -1458,6 +1523,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
i64 %2)
@@ -1491,6 +1557,7 @@ entry:
}
declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
i64);
@@ -1503,6 +1570,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
i64 %2)
@@ -1536,6 +1604,7 @@ entry:
}
declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
i64);
@@ -1548,6 +1617,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
i64 %2)
@@ -1581,6 +1651,7 @@ entry:
}
declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i64);
@@ -1593,6 +1664,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
i64 %2)
@@ -1626,6 +1698,7 @@ entry:
}
declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i64);
@@ -1638,6 +1711,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
i64 %2)
@@ -1671,6 +1745,7 @@ entry:
}
declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i64);
@@ -1683,6 +1758,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
i64 %2)
@@ -1716,6 +1792,7 @@ entry:
}
declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i64);
@@ -1728,6 +1805,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
i64 %2)
@@ -1761,6 +1839,7 @@ entry:
}
declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
i64);
@@ -1773,6 +1852,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
i64 %2)
@@ -1806,6 +1886,7 @@ entry:
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
@@ -1818,6 +1899,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
i64 %2)
@@ -1851,6 +1933,7 @@ entry:
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
@@ -1863,6 +1946,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
i64 %2)
@@ -1896,6 +1980,7 @@ entry:
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
@@ -1908,6 +1993,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
i64 %2)
@@ -1941,6 +2027,7 @@ entry:
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
i64);
@@ -1953,6 +2040,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
i64 %2)
@@ -1993,6 +2081,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
i64 %1)
@@ -2025,6 +2114,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
i64 %1)
@@ -2057,6 +2147,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
i64 %1)
@@ -2089,6 +2180,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
i64 %1)
@@ -2121,6 +2213,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
i64 %1)
@@ -2153,6 +2246,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
i64 %1)
@@ -2185,6 +2279,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
i64 %1)
@@ -2217,6 +2312,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
i64 %1)
@@ -2249,6 +2345,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
i64 %1)
@@ -2281,6 +2378,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
i64 %1)
@@ -2313,6 +2411,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
i64 %1)
@@ -2345,6 +2444,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
i64 %1)
@@ -2377,6 +2477,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
i64 %1)
@@ -2409,6 +2510,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
i64 %1)
@@ -2441,6 +2543,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
i64 %1)
@@ -2473,6 +2576,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
i64 %1)
@@ -2505,6 +2609,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
i64 %1)
@@ -2537,6 +2642,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
i64 %1)
@@ -2569,6 +2675,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
i64 %1)
@@ -2601,6 +2708,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
i64 %1)
@@ -2633,6 +2741,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
i64 %1)
@@ -2665,6 +2774,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
i64 %1)
More information about the llvm-commits
mailing list